From cc6702019b435f6c6ad7740be364afe3b0cb4bb4 Mon Sep 17 00:00:00 2001 From: mattBrzezinski Date: Wed, 26 Jun 2024 06:15:17 +0000 Subject: [PATCH] AWS API Definitions Updated --- src/AWSServices.jl | 104 +- src/services/accessanalyzer.jl | 389 +- src/services/account.jl | 254 +- src/services/acm_pca.jl | 32 +- src/services/alexa_for_business.jl | 3489 ---------- src/services/amp.jl | 448 +- src/services/amplify.jl | 316 +- src/services/amplifyuibuilder.jl | 115 +- src/services/api_gateway.jl | 44 +- src/services/apigatewayv2.jl | 2 +- src/services/appconfig.jl | 64 +- src/services/appfabric.jl | 1212 ++++ src/services/appflow.jl | 65 + src/services/appintegrations.jl | 272 +- src/services/application_auto_scaling.jl | 72 +- src/services/application_discovery_service.jl | 206 +- src/services/application_insights.jl | 354 + src/services/application_signals.jl | 754 ++ src/services/apprunner.jl | 131 +- src/services/appstream.jl | 455 +- src/services/appsync.jl | 275 +- src/services/apptest.jl | 893 +++ src/services/arc_zonal_shift.jl | 351 +- src/services/artifact.jl | 210 + src/services/athena.jl | 103 +- src/services/auditmanager.jl | 166 +- src/services/auto_scaling.jl | 277 +- src/services/b2bi.jl | 1182 ++++ src/services/backup.jl | 1100 ++- src/services/backupstorage.jl | 403 -- src/services/batch.jl | 142 +- src/services/bcm_data_exports.jl | 452 ++ src/services/bedrock.jl | 1394 ++++ src/services/bedrock_agent.jl | 1895 +++++ src/services/bedrock_agent_runtime.jl | 154 + src/services/bedrock_runtime.jl | 292 + src/services/billingconductor.jl | 51 +- src/services/braket.jl | 10 +- src/services/budgets.jl | 144 +- src/services/chatbot.jl | 1210 ++++ src/services/chime.jl | 742 +- src/services/chime_sdk_identity.jl | 3 + src/services/chime_sdk_media_pipelines.jl | 514 +- src/services/chime_sdk_meetings.jl | 156 +- src/services/chime_sdk_messaging.jl | 53 +- src/services/chime_sdk_voice.jl | 30 +- src/services/cleanrooms.jl | 1271 +++- src/services/cleanroomsml.jl | 1126 +++ src/services/cloud9.jl | 43 +- src/services/cloudcontrol.jl | 18 +- src/services/cloudformation.jl | 818 ++- src/services/cloudfront.jl | 228 +- src/services/cloudhsm_v2.jl | 5 +- src/services/cloudtrail.jl | 473 +- src/services/cloudwatch.jl | 197 +- src/services/cloudwatch_events.jl | 10 +- src/services/cloudwatch_logs.jl | 1493 +++- src/services/codeartifact.jl | 712 +- src/services/codebuild.jl | 406 +- src/services/codecatalyst.jl | 504 +- src/services/codecommit.jl | 242 +- src/services/codeconnections.jl | 1175 ++++ src/services/codedeploy.jl | 183 +- src/services/codeguru_security.jl | 49 +- src/services/codepipeline.jl | 82 +- src/services/codestar_connections.jl | 741 +- src/services/cognito_identity_provider.jl | 1646 +++-- src/services/comprehend.jl | 133 +- src/services/comprehendmedical.jl | 55 +- src/services/compute_optimizer.jl | 320 +- src/services/config_service.jl | 137 +- src/services/connect.jl | 4221 +++++++++-- src/services/connectcases.jl | 227 +- src/services/connectparticipant.jl | 79 +- src/services/controlcatalog.jl | 95 + src/services/controltower.jl | 1004 ++- src/services/cost_and_usage_report_service.jl | 151 +- src/services/cost_explorer.jl | 203 +- src/services/cost_optimization_hub.jl | 233 + src/services/customer_profiles.jl | 167 +- src/services/database_migration_service.jl | 3183 +++++++-- src/services/dataexchange.jl | 53 + src/services/datasync.jl | 730 +- src/services/datazone.jl | 5411 +++++++++++++++ src/services/deadline.jl | 4565 ++++++++++++ src/services/detective.jl | 289 +- src/services/devops_guru.jl | 16 +- src/services/dlm.jl | 99 +- src/services/docdb.jl | 96 +- src/services/docdb_elastic.jl | 321 +- src/services/drs.jl | 188 + src/services/dynamodb.jl | 711 +- src/services/ebs.jl | 29 +- src/services/ec2.jl | 4272 ++++++++---- src/services/ecr.jl | 125 +- src/services/ecs.jl | 537 +- src/services/efs.jl | 409 +- src/services/eks.jl | 1658 ++++- src/services/eks_auth.jl | 46 + src/services/elastic_load_balancing_v2.jl | 525 +- src/services/elasticache.jl | 843 ++- src/services/elasticsearch_service.jl | 38 + src/services/emr.jl | 200 +- src/services/emr_containers.jl | 141 + src/services/emr_serverless.jl | 75 +- src/services/entityresolution.jl | 1550 +++++ src/services/eventbridge.jl | 197 +- src/services/finspace.jl | 1028 ++- src/services/finspace_data.jl | 23 +- src/services/firehose.jl | 335 +- src/services/fis.jl | 320 +- src/services/fms.jl | 76 +- src/services/freetier.jl | 33 + src/services/fsx.jl | 407 +- src/services/gamelift.jl | 1115 ++- src/services/gamesparks.jl | 1257 ---- src/services/global_accelerator.jl | 362 +- src/services/glue.jl | 969 ++- src/services/grafana.jl | 357 +- src/services/greengrassv2.jl | 21 +- src/services/groundstation.jl | 8 +- src/services/guardduty.jl | 516 +- src/services/health.jl | 75 +- src/services/healthlake.jl | 72 +- src/services/honeycode.jl | 786 --- src/services/iam.jl | 293 +- src/services/identitystore.jl | 50 +- src/services/imagebuilder.jl | 1030 ++- src/services/inspector2.jl | 1077 ++- src/services/inspector_scan.jl | 42 + src/services/internetmonitor.jl | 411 +- src/services/iot.jl | 334 +- src/services/iot_roborunner.jl | 729 -- src/services/iot_wireless.jl | 184 +- src/services/iotfleethub.jl | 32 +- src/services/iotfleetwise.jl | 107 +- src/services/iotsitewise.jl | 833 ++- src/services/iottwinmaker.jl | 305 +- src/services/ivs.jl | 374 +- src/services/ivs_realtime.jl | 466 +- src/services/kafka.jl | 309 + src/services/kafkaconnect.jl | 156 +- src/services/kendra.jl | 211 +- src/services/keyspaces.jl | 127 +- src/services/kinesis.jl | 658 +- src/services/kinesis_analytics_v2.jl | 116 +- src/services/kinesis_video.jl | 142 +- src/services/kinesis_video_archived_media.jl | 218 +- src/services/kms.jl | 883 ++- src/services/lakeformation.jl | 368 +- src/services/lambda.jl | 362 +- src/services/launch_wizard.jl | 494 ++ src/services/lex_models_v2.jl | 1061 ++- src/services/lightsail.jl | 256 +- src/services/location.jl | 224 +- src/services/lookoutequipment.jl | 813 ++- src/services/m2.jl | 92 +- src/services/macie.jl | 281 - src/services/macie2.jl | 166 +- src/services/mailmanager.jl | 1858 +++++ src/services/managedblockchain.jl | 19 +- src/services/managedblockchain_query.jl | 440 ++ src/services/marketplace_agreement.jl | 136 + src/services/marketplace_catalog.jl | 74 +- .../marketplace_commerce_analytics.jl | 75 +- src/services/marketplace_deployment.jl | 185 + src/services/mediaconnect.jl | 33 + src/services/mediaconvert.jl | 46 +- src/services/medialive.jl | 1652 ++++- src/services/mediapackagev2.jl | 18 + src/services/mediatailor.jl | 14 + src/services/medical_imaging.jl | 751 ++ src/services/mgn.jl | 430 +- src/services/migrationhub_config.jl | 37 + src/services/migrationhuborchestrator.jl | 147 +- src/services/migrationhubstrategy.jl | 38 + src/services/mq.jl | 86 +- src/services/mwaa.jl | 58 +- src/services/neptune.jl | 48 +- src/services/neptunedata.jl | 1931 ++++++ src/services/network_firewall.jl | 70 +- src/services/networkmanager.jl | 65 +- src/services/networkmonitor.jl | 489 ++ src/services/oam.jl | 23 +- src/services/omics.jl | 527 +- src/services/opensearch.jl | 378 +- src/services/opensearchserverless.jl | 285 +- src/services/opsworks.jl | 532 +- src/services/organizations.jl | 272 +- src/services/osis.jl | 11 +- src/services/outposts.jl | 222 +- src/services/payment_cryptography.jl | 380 +- src/services/payment_cryptography_data.jl | 157 +- src/services/pca_connector_ad.jl | 1067 +++ src/services/pca_connector_scep.jl | 491 ++ src/services/personalize.jl | 473 +- src/services/personalize_events.jl | 109 +- src/services/personalize_runtime.jl | 69 +- src/services/pi.jl | 417 +- src/services/pinpoint.jl | 14 +- src/services/pinpoint_sms_voice_v2.jl | 2004 +++++- src/services/pipes.jl | 27 +- src/services/polly.jl | 36 +- src/services/pricing.jl | 16 +- src/services/privatenetworks.jl | 37 +- src/services/proton.jl | 101 + src/services/qbusiness.jl | 2493 +++++++ src/services/qconnect.jl | 1944 ++++++ src/services/quicksight.jl | 1022 ++- src/services/ram.jl | 10 +- src/services/rds.jl | 4814 ++++++++----- src/services/rds_data.jl | 5 +- src/services/redshift.jl | 864 ++- src/services/redshift_serverless.jl | 815 ++- src/services/rekognition.jl | 685 +- src/services/repostspace.jl | 412 ++ src/services/resiliencehub.jl | 666 +- src/services/resource_explorer_2.jl | 99 +- src/services/resource_groups.jl | 19 +- src/services/rolesanywhere.jl | 115 +- src/services/route53_recovery_cluster.jl | 48 +- .../route53_recovery_control_config.jl | 32 + src/services/route53profiles.jl | 643 ++ src/services/route53resolver.jl | 316 +- src/services/route_53.jl | 132 +- src/services/route_53_domains.jl | 184 +- src/services/rum.jl | 31 +- src/services/s3.jl | 6151 ++++++++++------- src/services/s3_control.jl | 3784 +++++++--- src/services/sagemaker.jl | 1970 +++++- .../sagemaker_featurestore_runtime.jl | 68 +- src/services/sagemaker_runtime.jl | 98 +- src/services/savingsplans.jl | 80 +- src/services/scheduler.jl | 8 +- src/services/secrets_manager.jl | 324 +- src/services/securityhub.jl | 705 +- src/services/securitylake.jl | 264 +- src/services/service_catalog.jl | 58 +- src/services/service_catalog_appregistry.jl | 34 +- src/services/service_quotas.jl | 226 +- src/services/servicediscovery.jl | 128 +- src/services/ses.jl | 634 +- src/services/sesv2.jl | 235 +- src/services/sfn.jl | 882 ++- src/services/signer.jl | 61 +- src/services/snowball.jl | 82 +- src/services/sns.jl | 129 +- src/services/sqs.jl | 348 +- src/services/ssm.jl | 518 +- src/services/ssm_incidents.jl | 163 +- src/services/ssm_sap.jl | 178 +- src/services/sso_admin.jl | 2861 ++++++-- src/services/sso_oidc.jl | 151 +- src/services/storage_gateway.jl | 222 +- src/services/sts.jl | 10 +- src/services/supplychain.jl | 158 + src/services/swf.jl | 129 +- src/services/taxsettings.jl | 380 + src/services/textract.jl | 519 +- src/services/timestream_influxdb.jl | 459 ++ src/services/timestream_query.jl | 65 +- src/services/transcribe.jl | 324 +- src/services/transfer.jl | 426 +- src/services/translate.jl | 38 +- src/services/trustedadvisor.jl | 450 ++ src/services/verifiedpermissions.jl | 364 +- src/services/vpc_lattice.jl | 49 +- src/services/wafv2.jl | 329 +- src/services/wellarchitected.jl | 724 +- src/services/wisdom.jl | 552 +- src/services/workmail.jl | 407 +- src/services/workspaces.jl | 585 +- src/services/workspaces_thin_client.jl | 605 ++ src/services/workspaces_web.jl | 84 +- src/services/xray.jl | 4 +- 275 files changed, 118807 insertions(+), 26611 deletions(-) delete mode 100644 src/services/alexa_for_business.jl create mode 100644 src/services/appfabric.jl create mode 100644 src/services/application_signals.jl create mode 100644 src/services/apptest.jl create mode 100644 src/services/artifact.jl create mode 100644 src/services/b2bi.jl delete mode 100644 src/services/backupstorage.jl create mode 100644 src/services/bcm_data_exports.jl create mode 100644 src/services/bedrock.jl create mode 100644 src/services/bedrock_agent.jl create mode 100644 src/services/bedrock_agent_runtime.jl create mode 100644 src/services/bedrock_runtime.jl create mode 100644 src/services/chatbot.jl create mode 100644 src/services/cleanroomsml.jl create mode 100644 src/services/codeconnections.jl create mode 100644 src/services/controlcatalog.jl create mode 100644 src/services/cost_optimization_hub.jl create mode 100644 src/services/datazone.jl create mode 100644 src/services/deadline.jl create mode 100644 src/services/eks_auth.jl create mode 100644 src/services/entityresolution.jl create mode 100644 src/services/freetier.jl delete mode 100644 src/services/gamesparks.jl delete mode 100644 src/services/honeycode.jl create mode 100644 src/services/inspector_scan.jl delete mode 100644 src/services/iot_roborunner.jl create mode 100644 src/services/launch_wizard.jl delete mode 100644 src/services/macie.jl create mode 100644 src/services/mailmanager.jl create mode 100644 src/services/managedblockchain_query.jl create mode 100644 src/services/marketplace_agreement.jl create mode 100644 src/services/marketplace_deployment.jl create mode 100644 src/services/medical_imaging.jl create mode 100644 src/services/neptunedata.jl create mode 100644 src/services/networkmonitor.jl create mode 100644 src/services/pca_connector_ad.jl create mode 100644 src/services/pca_connector_scep.jl create mode 100644 src/services/qbusiness.jl create mode 100644 src/services/qconnect.jl create mode 100644 src/services/repostspace.jl create mode 100644 src/services/route53profiles.jl create mode 100644 src/services/supplychain.jl create mode 100644 src/services/taxsettings.jl create mode 100644 src/services/timestream_influxdb.jl create mode 100644 src/services/trustedadvisor.jl create mode 100644 src/services/workspaces_thin_client.jl diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 6b7f6819c8..259e8f8066 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -11,9 +11,6 @@ const accessanalyzer = AWS.RestJSONService( const account = AWS.RestJSONService("account", "account", "2021-02-01") const acm = AWS.JSONService("acm", "acm", "2015-12-08", "1.1", "CertificateManager") const acm_pca = AWS.JSONService("acm-pca", "acm-pca", "2017-08-22", "1.1", "ACMPrivateCA") -const alexa_for_business = AWS.JSONService( - "a4b", "a4b", "2017-11-09", "1.1", "AlexaForBusiness" -) const amp = AWS.RestJSONService("aps", "aps", "2020-08-01") const amplify = AWS.RestJSONService("amplify", "amplify", "2017-07-25") const amplifybackend = AWS.RestJSONService("amplifybackend", "amplifybackend", "2020-08-11") @@ -28,6 +25,7 @@ const apigatewayv2 = AWS.RestJSONService("apigateway", "apigateway", "2018-11-29 const app_mesh = AWS.RestJSONService("appmesh", "appmesh", "2019-01-25") const appconfig = AWS.RestJSONService("appconfig", "appconfig", "2019-10-09") const appconfigdata = AWS.RestJSONService("appconfig", "appconfigdata", "2021-11-11") +const appfabric = AWS.RestJSONService("appfabric", "appfabric", "2023-05-19") const appflow = AWS.RestJSONService("appflow", "appflow", "2020-08-23") const appintegrations = AWS.RestJSONService( "app-integrations", "app-integrations", "2020-07-29" @@ -49,6 +47,9 @@ const application_insights = AWS.JSONService( "1.1", "EC2WindowsBarleyService", ) +const application_signals = AWS.RestJSONService( + "application-signals", "application-signals", "2024-04-15" +) const applicationcostprofiler = AWS.RestJSONService( "application-cost-profiler", "application-cost-profiler", "2020-09-10" ) @@ -59,9 +60,11 @@ const appstream = AWS.JSONService( "appstream", "appstream2", "2016-12-01", "1.1", "PhotonAdminProxyService" ) const appsync = AWS.RestJSONService("appsync", "appsync", "2017-07-25") +const apptest = AWS.RestJSONService("apptest", "apptest", "2022-12-06") const arc_zonal_shift = AWS.RestJSONService( "arc-zonal-shift", "arc-zonal-shift", "2022-10-30" ) +const artifact = AWS.RestJSONService("artifact", "artifact", "2018-05-10") const athena = AWS.JSONService("athena", "athena", "2017-05-18", "1.1", "AmazonAthena") const auditmanager = AWS.RestJSONService("auditmanager", "auditmanager", "2017-07-25") const auto_scaling = AWS.QueryService("autoscaling", "autoscaling", "2011-01-01") @@ -72,12 +75,25 @@ const auto_scaling_plans = AWS.JSONService( "1.1", "AnyScaleScalingPlannerFrontendService", ) +const b2bi = AWS.JSONService("b2bi", "b2bi", "2022-06-23", "1.0", "B2BI") const backup = AWS.RestJSONService("backup", "backup", "2018-11-15") const backup_gateway = AWS.JSONService( "backup-gateway", "backup-gateway", "2021-01-01", "1.0", "BackupOnPremises_v20210101" ) -const backupstorage = AWS.RestJSONService("backup-storage", "backupstorage", "2018-04-10") const batch = AWS.RestJSONService("batch", "batch", "2016-08-10") +const bcm_data_exports = AWS.JSONService( + "bcm-data-exports", + "bcm-data-exports", + "2023-11-26", + "1.1", + "AWSBillingAndCostManagementDataExports", +) +const bedrock = AWS.RestJSONService("bedrock", "bedrock", "2023-04-20") +const bedrock_agent = AWS.RestJSONService("bedrock", "bedrock-agent", "2023-06-05") +const bedrock_agent_runtime = AWS.RestJSONService( + "bedrock", "bedrock-agent-runtime", "2023-07-26" +) +const bedrock_runtime = AWS.RestJSONService("bedrock", "bedrock-runtime", "2023-09-30") const billingconductor = AWS.RestJSONService( "billingconductor", "billingconductor", "2021-07-30" ) @@ -85,6 +101,7 @@ const braket = AWS.RestJSONService("braket", "braket", "2019-09-01") const budgets = AWS.JSONService( "budgets", "budgets", "2016-10-20", "1.1", "AWSBudgetServiceGateway" ) +const chatbot = AWS.RestJSONService("chatbot", "chatbot", "2017-10-11") const chime = AWS.RestJSONService("chime", "chime", "2018-05-01") const chime_sdk_identity = AWS.RestJSONService("chime", "identity-chime", "2021-04-20") const chime_sdk_media_pipelines = AWS.RestJSONService( @@ -94,6 +111,7 @@ const chime_sdk_meetings = AWS.RestJSONService("chime", "meetings-chime", "2021- const chime_sdk_messaging = AWS.RestJSONService("chime", "messaging-chime", "2021-05-15") const chime_sdk_voice = AWS.RestJSONService("chime", "voice-chime", "2022-08-03") const cleanrooms = AWS.RestJSONService("cleanrooms", "cleanrooms", "2022-02-17") +const cleanroomsml = AWS.RestJSONService("cleanrooms-ml", "cleanrooms-ml", "2023-09-06") const cloud9 = AWS.JSONService( "cloud9", "cloud9", "2017-09-23", "1.1", "AWSCloud9WorkspaceManagementService" ) @@ -138,6 +156,13 @@ const codecatalyst = AWS.RestJSONService("codecatalyst", "codecatalyst", "2022-0 const codecommit = AWS.JSONService( "codecommit", "codecommit", "2015-04-13", "1.1", "CodeCommit_20150413" ) +const codeconnections = AWS.JSONService( + "codeconnections", + "codeconnections", + "2023-12-01", + "1.0", + "com.amazonaws.codeconnections.CodeConnections_20231201", +) const codedeploy = AWS.JSONService( "codedeploy", "codedeploy", "2014-10-06", "1.1", "CodeDeploy_20141006" ) @@ -198,6 +223,7 @@ const connectcases = AWS.RestJSONService("cases", "cases", "2022-10-03") const connectparticipant = AWS.RestJSONService( "execute-api", "participant.connect", "2018-09-07" ) +const controlcatalog = AWS.RestJSONService("controlcatalog", "controlcatalog", "2018-05-10") const controltower = AWS.RestJSONService("controltower", "controltower", "2018-05-10") const cost_and_usage_report_service = AWS.JSONService( "cur", "cur", "2017-01-06", "1.1", "AWSOrigamiServiceGatewayService" @@ -205,6 +231,13 @@ const cost_and_usage_report_service = AWS.JSONService( const cost_explorer = AWS.JSONService( "ce", "ce", "2017-10-25", "1.1", "AWSInsightsIndexService" ) +const cost_optimization_hub = AWS.JSONService( + "cost-optimization-hub", + "cost-optimization-hub", + "2022-07-26", + "1.0", + "CostOptimizationHubService", +) const customer_profiles = AWS.RestJSONService("profile", "profile", "2020-08-15") const data_pipeline = AWS.JSONService( "datapipeline", "datapipeline", "2012-10-29", "1.1", "DataPipeline" @@ -215,7 +248,9 @@ const database_migration_service = AWS.JSONService( const databrew = AWS.RestJSONService("databrew", "databrew", "2017-07-25") const dataexchange = AWS.RestJSONService("dataexchange", "dataexchange", "2017-07-25") const datasync = AWS.JSONService("datasync", "datasync", "2018-11-09", "1.1", "FmrsService") +const datazone = AWS.RestJSONService("datazone", "datazone", "2018-05-10") const dax = AWS.JSONService("dax", "dax", "2017-04-19", "1.1", "AmazonDAXV3") +const deadline = AWS.RestJSONService("deadline", "deadline", "2023-10-12") const detective = AWS.RestJSONService("detective", "api.detective", "2018-10-26") const device_farm = AWS.JSONService( "devicefarm", "devicefarm", "2015-06-23", "1.1", "DeviceFarm_20150623" @@ -257,6 +292,7 @@ const ecs = AWS.JSONService( ) const efs = AWS.RestJSONService("elasticfilesystem", "elasticfilesystem", "2015-02-01") const eks = AWS.RestJSONService("eks", "eks", "2017-11-01") +const eks_auth = AWS.RestJSONService("eks-auth", "eks-auth", "2023-11-26") const elastic_beanstalk = AWS.QueryService( "elasticbeanstalk", "elasticbeanstalk", "2010-12-01" ) @@ -279,6 +315,9 @@ const emr = AWS.JSONService( ) const emr_containers = AWS.RestJSONService("emr-containers", "emr-containers", "2020-10-01") const emr_serverless = AWS.RestJSONService("emr-serverless", "emr-serverless", "2021-07-13") +const entityresolution = AWS.RestJSONService( + "entityresolution", "entityresolution", "2018-05-10" +) const eventbridge = AWS.JSONService("events", "events", "2015-10-07", "1.1", "AWSEvents") const evidently = AWS.RestJSONService("evidently", "evidently", "2021-02-01") const finspace = AWS.RestJSONService("finspace", "finspace", "2021-03-12") @@ -297,11 +336,13 @@ const forecastquery = AWS.JSONService( const frauddetector = AWS.JSONService( "frauddetector", "frauddetector", "2019-11-15", "1.1", "AWSHawksNestServiceFacade" ) +const freetier = AWS.JSONService( + "freetier", "freetier", "2023-09-07", "1.0", "AWSFreeTierService" +) const fsx = AWS.JSONService( "fsx", "fsx", "2018-03-01", "1.1", "AWSSimbaAPIService_v20180301" ) const gamelift = AWS.JSONService("gamelift", "gamelift", "2015-10-01", "1.1", "GameLift") -const gamesparks = AWS.RestJSONService("gamesparks", "gamesparks", "2021-08-17") const glacier = AWS.RestJSONService( "glacier", "glacier", "2012-06-01", LittleDict("x-amz-glacier-version" => "2012-06-01") ) @@ -324,7 +365,6 @@ const health = AWS.JSONService( const healthlake = AWS.JSONService( "healthlake", "healthlake", "2017-07-01", "1.0", "HealthLake" ) -const honeycode = AWS.RestJSONService("honeycode", "honeycode", "2020-03-01") const iam = AWS.QueryService("iam", "iam", "2010-05-08") const identitystore = AWS.JSONService( "identitystore", "identitystore", "2020-06-15", "1.1", "AWSIdentityStore" @@ -335,6 +375,7 @@ const inspector = AWS.JSONService( "inspector", "inspector", "2016-02-16", "1.1", "InspectorService" ) const inspector2 = AWS.RestJSONService("inspector2", "inspector2", "2020-06-08") +const inspector_scan = AWS.RestJSONService("inspector-scan", "inspector-scan", "2023-08-08") const internetmonitor = AWS.RestJSONService( "internetmonitor", "internetmonitor", "2021-06-03" ) @@ -351,7 +392,6 @@ const iot_events_data = AWS.RestJSONService("ioteventsdata", "data.iotevents", " const iot_jobs_data_plane = AWS.RestJSONService( "iot-jobs-data", "data.jobs.iot", "2017-09-29" ) -const iot_roborunner = AWS.RestJSONService("iotroborunner", "iotroborunner", "2018-05-10") const iot_wireless = AWS.RestJSONService("iotwireless", "api.iotwireless", "2020-11-22") const iotanalytics = AWS.RestJSONService("iotanalytics", "iotanalytics", "2017-11-27") const iotdeviceadvisor = AWS.RestJSONService( @@ -412,6 +452,7 @@ const kinesis_video_webrtc_storage = AWS.RestJSONService( const kms = AWS.JSONService("kms", "kms", "2014-11-01", "1.1", "TrentService") const lakeformation = AWS.RestJSONService("lakeformation", "lakeformation", "2017-03-31") const lambda = AWS.RestJSONService("lambda", "lambda", "2015-03-31") +const launch_wizard = AWS.RestJSONService("launchwizard", "launchwizard", "2018-05-10") const lex_model_building_service = AWS.RestJSONService("lex", "models.lex", "2017-04-19") const lex_models_v2 = AWS.RestJSONService("lex", "models-v2-lex", "2020-08-07") const lex_runtime_service = AWS.RestJSONService("lex", "runtime.lex", "2016-11-28") @@ -444,11 +485,23 @@ const m2 = AWS.RestJSONService("m2", "m2", "2021-04-28") const machine_learning = AWS.JSONService( "machinelearning", "machinelearning", "2014-12-12", "1.1", "AmazonML_20141212" ) -const macie = AWS.JSONService("macie", "macie", "2017-12-19", "1.1", "MacieService") const macie2 = AWS.RestJSONService("macie2", "macie2", "2020-01-01") +const mailmanager = AWS.JSONService( + "ses", "mail-manager", "2023-10-17", "1.0", "MailManagerSvc" +) const managedblockchain = AWS.RestJSONService( "managedblockchain", "managedblockchain", "2018-09-24" ) +const managedblockchain_query = AWS.RestJSONService( + "managedblockchain-query", "managedblockchain-query", "2023-05-04" +) +const marketplace_agreement = AWS.JSONService( + "aws-marketplace", + "agreement-marketplace", + "2020-03-01", + "1.0", + "AWSMPCommerceService_v20200301", +) const marketplace_catalog = AWS.RestJSONService( "aws-marketplace", "catalog.marketplace", "2018-09-17" ) @@ -459,6 +512,9 @@ const marketplace_commerce_analytics = AWS.JSONService( "1.1", "MarketplaceCommerceAnalytics20150701", ) +const marketplace_deployment = AWS.RestJSONService( + "aws-marketplace", "deployment-marketplace", "2023-01-25" +) const marketplace_entitlement_service = AWS.JSONService( "aws-marketplace", "entitlement.marketplace", @@ -482,6 +538,9 @@ const mediastore = AWS.JSONService( ) const mediastore_data = AWS.RestJSONService("mediastore", "data.mediastore", "2017-09-01") const mediatailor = AWS.RestJSONService("mediatailor", "api.mediatailor", "2018-04-23") +const medical_imaging = AWS.RestJSONService( + "medical-imaging", "medical-imaging", "2023-07-19" +) const memorydb = AWS.JSONService( "memorydb", "memory-db", "2021-01-01", "1.1", "AmazonMemoryDB" ) @@ -513,10 +572,12 @@ const mturk = AWS.JSONService( ) const mwaa = AWS.RestJSONService("airflow", "airflow", "2020-07-01") const neptune = AWS.QueryService("rds", "rds", "2014-10-31") +const neptunedata = AWS.RestJSONService("neptune-db", "neptune-db", "2023-08-01") const network_firewall = AWS.JSONService( "network-firewall", "network-firewall", "2020-11-12", "1.0", "NetworkFirewall_20201112" ) const networkmanager = AWS.RestJSONService("networkmanager", "networkmanager", "2019-07-05") +const networkmonitor = AWS.RestJSONService("networkmonitor", "networkmonitor", "2023-08-01") const nimble = AWS.RestJSONService("nimble", "nimble", "2020-08-01") const oam = AWS.RestJSONService("oam", "oam", "2022-06-10") const omics = AWS.RestJSONService("omics", "omics", "2022-11-28") @@ -546,6 +607,12 @@ const payment_cryptography = AWS.JSONService( const payment_cryptography_data = AWS.RestJSONService( "payment-cryptography", "dataplane.payment-cryptography", "2022-02-03" ) +const pca_connector_ad = AWS.RestJSONService( + "pca-connector-ad", "pca-connector-ad", "2018-05-10" +) +const pca_connector_scep = AWS.RestJSONService( + "pca-connector-scep", "pca-connector-scep", "2018-05-10" +) const personalize = AWS.JSONService( "personalize", "personalize", "2018-05-22", "1.1", "AmazonPersonalize" ) @@ -573,6 +640,8 @@ const privatenetworks = AWS.RestJSONService( "private-networks", "private-networks", "2021-12-03" ) const proton = AWS.JSONService("proton", "proton", "2020-07-20", "1.0", "AwsProton20200720") +const qbusiness = AWS.RestJSONService("qbusiness", "qbusiness", "2023-11-27") +const qconnect = AWS.RestJSONService("wisdom", "wisdom", "2020-10-19") const qldb = AWS.RestJSONService("qldb", "qldb", "2019-01-02") const qldb_session = AWS.JSONService( "qldb", "session.qldb", "2019-07-11", "1.0", "QLDBSession" @@ -592,6 +661,7 @@ const redshift_serverless = AWS.JSONService( const rekognition = AWS.JSONService( "rekognition", "rekognition", "2016-06-27", "1.1", "RekognitionService" ) +const repostspace = AWS.RestJSONService("repostspace", "repostspace", "2022-05-13") const resiliencehub = AWS.RestJSONService("resiliencehub", "resiliencehub", "2020-04-30") const resource_explorer_2 = AWS.RestJSONService( "resource-explorer-2", "resource-explorer-2", "2022-07-28" @@ -617,6 +687,9 @@ const route53_recovery_control_config = AWS.RestJSONService( const route53_recovery_readiness = AWS.RestJSONService( "route53-recovery-readiness", "route53-recovery-readiness", "2019-12-02" ) +const route53profiles = AWS.RestJSONService( + "route53profiles", "route53profiles", "2018-05-10" +) const route53resolver = AWS.JSONService( "route53resolver", "route53resolver", "2018-04-01", "1.1", "Route53Resolver" ) @@ -693,7 +766,7 @@ const snowball = AWS.JSONService( "snowball", "snowball", "2016-06-30", "1.1", "AWSIESnowballJobManagementService" ) const sns = AWS.QueryService("sns", "sns", "2010-03-31") -const sqs = AWS.QueryService("sqs", "sqs", "2012-11-05") +const sqs = AWS.JSONService("sqs", "sqs", "2012-11-05", "1.0", "AmazonSQS") const ssm = AWS.JSONService("ssm", "ssm", "2014-11-06", "1.1", "AmazonSSM") const ssm_contacts = AWS.JSONService( "ssm-contacts", "ssm-contacts", "2021-05-03", "1.1", "SSMContacts" @@ -702,18 +775,27 @@ const ssm_incidents = AWS.RestJSONService("ssm-incidents", "ssm-incidents", "201 const ssm_sap = AWS.RestJSONService("ssm-sap", "ssm-sap", "2018-05-10") const sso = AWS.RestJSONService("awsssoportal", "portal.sso", "2019-06-10") const sso_admin = AWS.JSONService("sso", "sso", "2020-07-20", "1.1", "SWBExternalService") -const sso_oidc = AWS.RestJSONService("awsssooidc", "oidc", "2019-06-10") +const sso_oidc = AWS.RestJSONService("sso-oauth", "oidc", "2019-06-10") const storage_gateway = AWS.JSONService( "storagegateway", "storagegateway", "2013-06-30", "1.1", "StorageGateway_20130630" ) const sts = AWS.QueryService("sts", "sts", "2011-06-15") +const supplychain = AWS.RestJSONService("scn", "scn", "2024-01-01") const support = AWS.JSONService( "support", "support", "2013-04-15", "1.1", "AWSSupport_20130415" ) const support_app = AWS.RestJSONService("supportapp", "supportapp", "2021-08-20") const swf = AWS.JSONService("swf", "swf", "2012-01-25", "1.0", "SimpleWorkflowService") const synthetics = AWS.RestJSONService("synthetics", "synthetics", "2017-10-11") +const taxsettings = AWS.RestJSONService("tax", "tax", "2018-05-10") const textract = AWS.JSONService("textract", "textract", "2018-06-27", "1.1", "Textract") +const timestream_influxdb = AWS.JSONService( + "timestream-influxdb", + "timestream-influxdb", + "2023-01-27", + "1.0", + "AmazonTimestreamInfluxDB", +) const timestream_query = AWS.JSONService( "timestream", "query.timestream", "2018-11-01", "1.0", "Timestream_20181101" ) @@ -730,6 +812,7 @@ const transfer = AWS.JSONService( const translate = AWS.JSONService( "translate", "translate", "2017-07-01", "1.1", "AWSShineFrontendService_20170701" ) +const trustedadvisor = AWS.RestJSONService("trustedadvisor", "trustedadvisor", "2022-09-15") const verifiedpermissions = AWS.JSONService( "verifiedpermissions", "verifiedpermissions", "2021-12-01", "1.0", "VerifiedPermissions" ) @@ -755,6 +838,7 @@ const workmailmessageflow = AWS.RestJSONService( const workspaces = AWS.JSONService( "workspaces", "workspaces", "2015-04-08", "1.1", "WorkspacesService" ) +const workspaces_thin_client = AWS.RestJSONService("thinclient", "thinclient", "2023-08-22") const workspaces_web = AWS.RestJSONService("workspaces-web", "workspaces-web", "2020-07-08") const xray = AWS.RestJSONService("xray", "xray", "2016-04-12") diff --git a/src/services/accessanalyzer.jl b/src/services/accessanalyzer.jl index 1a879d5ec7..71843a72a5 100644 --- a/src/services/accessanalyzer.jl +++ b/src/services/accessanalyzer.jl @@ -91,6 +91,186 @@ function cancel_policy_generation( ) end +""" + check_access_not_granted(access, policy_document, policy_type) + check_access_not_granted(access, policy_document, policy_type, params::Dict{String,<:Any}) + +Checks whether the specified access isn't allowed by a policy. + +# Arguments +- `access`: An access object containing the permissions that shouldn't be granted by the + specified policy. If only actions are specified, IAM Access Analyzer checks for access of + the actions on all resources in the policy. If only resources are specified, then IAM + Access Analyzer checks which actions have access to the specified resources. If both + actions and resources are specified, then IAM Access Analyzer checks which of the specified + actions have access to the specified resources. +- `policy_document`: The JSON policy document to use as the content for the policy. +- `policy_type`: The type of policy. Identity policies grant permissions to IAM principals. + Identity policies include managed and inline policies for IAM roles, users, and groups. + Resource policies grant permissions on Amazon Web Services resources. Resource policies + include trust policies for IAM roles and bucket policies for Amazon S3 buckets. You can + provide a generic input such as identity policy or resource policy or a specific input such + as managed policy or Amazon S3 bucket policy. + +""" +function check_access_not_granted( + access, policyDocument, policyType; aws_config::AbstractAWSConfig=global_aws_config() +) + return accessanalyzer( + "POST", + "/policy/check-access-not-granted", + Dict{String,Any}( + "access" => access, + "policyDocument" => policyDocument, + "policyType" => policyType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function check_access_not_granted( + access, + policyDocument, + policyType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/policy/check-access-not-granted", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "access" => access, + "policyDocument" => policyDocument, + "policyType" => policyType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + check_no_new_access(existing_policy_document, new_policy_document, policy_type) + check_no_new_access(existing_policy_document, new_policy_document, policy_type, params::Dict{String,<:Any}) + +Checks whether new access is allowed for an updated policy when compared to the existing +policy. You can find examples for reference policies and learn how to set up and run a +custom policy check for new access in the IAM Access Analyzer custom policy checks samples +repository on GitHub. The reference policies in this repository are meant to be passed to +the existingPolicyDocument request parameter. + +# Arguments +- `existing_policy_document`: The JSON policy document to use as the content for the + existing policy. +- `new_policy_document`: The JSON policy document to use as the content for the updated + policy. +- `policy_type`: The type of policy to compare. Identity policies grant permissions to IAM + principals. Identity policies include managed and inline policies for IAM roles, users, and + groups. Resource policies grant permissions on Amazon Web Services resources. Resource + policies include trust policies for IAM roles and bucket policies for Amazon S3 buckets. + You can provide a generic input such as identity policy or resource policy or a specific + input such as managed policy or Amazon S3 bucket policy. + +""" +function check_no_new_access( + existingPolicyDocument, + newPolicyDocument, + policyType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/policy/check-no-new-access", + Dict{String,Any}( + "existingPolicyDocument" => existingPolicyDocument, + "newPolicyDocument" => newPolicyDocument, + "policyType" => policyType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function check_no_new_access( + existingPolicyDocument, + newPolicyDocument, + policyType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/policy/check-no-new-access", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "existingPolicyDocument" => existingPolicyDocument, + "newPolicyDocument" => newPolicyDocument, + "policyType" => policyType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + check_no_public_access(policy_document, resource_type) + check_no_public_access(policy_document, resource_type, params::Dict{String,<:Any}) + +Checks whether a resource policy can grant public access to the specified resource type. + +# Arguments +- `policy_document`: The JSON policy document to evaluate for public access. +- `resource_type`: The type of resource to evaluate for public access. For example, to + check for public access to Amazon S3 buckets, you can choose AWS::S3::Bucket for the + resource type. For resource types not supported as valid values, IAM Access Analyzer will + return an error. + +""" +function check_no_public_access( + policyDocument, resourceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return accessanalyzer( + "POST", + "/policy/check-no-public-access", + Dict{String,Any}( + "policyDocument" => policyDocument, "resourceType" => resourceType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function check_no_public_access( + policyDocument, + resourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/policy/check-no-public-access", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "policyDocument" => policyDocument, "resourceType" => resourceType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_access_preview(analyzer_arn, configurations) create_access_preview(analyzer_arn, configurations, params::Dict{String,<:Any}) @@ -158,16 +338,20 @@ Creates an analyzer for your account. # Arguments - `analyzer_name`: The name of the analyzer to create. -- `type`: The type of analyzer to create. Only ACCOUNT and ORGANIZATION analyzers are - supported. You can create only one analyzer per account per Region. You can create up to 5 - analyzers per organization per Region. +- `type`: The type of analyzer to create. Only ACCOUNT, ORGANIZATION, + ACCOUNT_UNUSED_ACCESS, and ORGANIZATION_UNUSED_ACCESS analyzers are supported. You can + create only one analyzer per account per Region. You can create up to 5 analyzers per + organization per Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"archiveRules"`: Specifies the archive rules to add for the analyzer. Archive rules automatically archive findings that meet the criteria you define for the rule. - `"clientToken"`: A client token. -- `"tags"`: The tags to apply to the analyzer. +- `"configuration"`: Specifies the configuration of the analyzer. If the analyzer is an + unused access analyzer, the specified scope of unused access is used for the configuration. + If the analyzer is an external access analyzer, this field is not used. +- `"tags"`: An array of key-value pairs to apply to the analyzer. """ function create_analyzer( analyzerName, type; aws_config::AbstractAWSConfig=global_aws_config() @@ -346,6 +530,45 @@ function delete_archive_rule( ) end +""" + generate_finding_recommendation(analyzer_arn, id) + generate_finding_recommendation(analyzer_arn, id, params::Dict{String,<:Any}) + +Creates a recommendation for an unused permissions finding. + +# Arguments +- `analyzer_arn`: The ARN of the analyzer used to generate the finding recommendation. +- `id`: The unique ID for the finding recommendation. + +""" +function generate_finding_recommendation( + analyzerArn, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return accessanalyzer( + "POST", + "/recommendation/$(id)", + Dict{String,Any}("analyzerArn" => analyzerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function generate_finding_recommendation( + analyzerArn, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/recommendation/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("analyzerArn" => analyzerArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_access_preview(access_preview_id, analyzer_arn) get_access_preview(access_preview_id, analyzer_arn, params::Dict{String,<:Any}) @@ -503,7 +726,9 @@ end get_finding(analyzer_arn, id) get_finding(analyzer_arn, id, params::Dict{String,<:Any}) -Retrieves information about the specified finding. +Retrieves information about the specified finding. GetFinding and GetFindingV2 both use +access-analyzer:GetFinding in the Action element of an IAM policy statement. You must have +permission to perform the access-analyzer:GetFinding action. # Arguments - `analyzer_arn`: The ARN of the analyzer that generated the finding. @@ -536,6 +761,92 @@ function get_finding( ) end +""" + get_finding_recommendation(analyzer_arn, id) + get_finding_recommendation(analyzer_arn, id, params::Dict{String,<:Any}) + +Retrieves information about a finding recommendation for the specified analyzer. + +# Arguments +- `analyzer_arn`: The ARN of the analyzer used to generate the finding recommendation. +- `id`: The unique ID for the finding recommendation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. +- `"nextToken"`: A token used for pagination of results returned. +""" +function get_finding_recommendation( + analyzerArn, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return accessanalyzer( + "GET", + "/recommendation/$(id)", + Dict{String,Any}("analyzerArn" => analyzerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_finding_recommendation( + analyzerArn, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "GET", + "/recommendation/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("analyzerArn" => analyzerArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_finding_v2(analyzer_arn, id) + get_finding_v2(analyzer_arn, id, params::Dict{String,<:Any}) + +Retrieves information about the specified finding. GetFinding and GetFindingV2 both use +access-analyzer:GetFinding in the Action element of an IAM policy statement. You must have +permission to perform the access-analyzer:GetFinding action. + +# Arguments +- `analyzer_arn`: The ARN of the analyzer that generated the finding. +- `id`: The ID of the finding to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. +- `"nextToken"`: A token used for pagination of results returned. +""" +function get_finding_v2(analyzerArn, id; aws_config::AbstractAWSConfig=global_aws_config()) + return accessanalyzer( + "GET", + "/findingv2/$(id)", + Dict{String,Any}("analyzerArn" => analyzerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_finding_v2( + analyzerArn, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "GET", + "/findingv2/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("analyzerArn" => analyzerArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_generated_policy(job_id) get_generated_policy(job_id, params::Dict{String,<:Any}) @@ -669,7 +980,8 @@ end list_analyzed_resources(analyzer_arn, params::Dict{String,<:Any}) Retrieves a list of resources of the specified type that have been analyzed by the -specified analyzer.. +specified external access analyzer. This action is not supported for unused access +analyzers. # Arguments - `analyzer_arn`: The ARN of the analyzer to retrieve a list of analyzed resources from. @@ -772,9 +1084,11 @@ end list_findings(analyzer_arn) list_findings(analyzer_arn, params::Dict{String,<:Any}) -Retrieves a list of findings generated by the specified analyzer. To learn about filter -keys that you can use to retrieve a list of findings, see IAM Access Analyzer filter keys -in the IAM User Guide. +Retrieves a list of findings generated by the specified analyzer. ListFindings and +ListFindingsV2 both use access-analyzer:ListFindings in the Action element of an IAM policy +statement. You must have permission to perform the access-analyzer:ListFindings action. To +learn about filter keys that you can use to retrieve a list of findings, see IAM Access +Analyzer filter keys in the IAM User Guide. # Arguments - `analyzer_arn`: The ARN of the analyzer to retrieve findings from. @@ -811,6 +1125,51 @@ function list_findings( ) end +""" + list_findings_v2(analyzer_arn) + list_findings_v2(analyzer_arn, params::Dict{String,<:Any}) + +Retrieves a list of findings generated by the specified analyzer. ListFindings and +ListFindingsV2 both use access-analyzer:ListFindings in the Action element of an IAM policy +statement. You must have permission to perform the access-analyzer:ListFindings action. To +learn about filter keys that you can use to retrieve a list of findings, see IAM Access +Analyzer filter keys in the IAM User Guide. + +# Arguments +- `analyzer_arn`: The ARN of the analyzer to retrieve findings from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: A filter to match for the findings to return. +- `"maxResults"`: The maximum number of results to return in the response. +- `"nextToken"`: A token used for pagination of results returned. +- `"sort"`: +""" +function list_findings_v2(analyzerArn; aws_config::AbstractAWSConfig=global_aws_config()) + return accessanalyzer( + "POST", + "/findingv2", + Dict{String,Any}("analyzerArn" => analyzerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_findings_v2( + analyzerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return accessanalyzer( + "POST", + "/findingv2", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("analyzerArn" => analyzerArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_policy_generations() list_policy_generations(params::Dict{String,<:Any}) @@ -1176,12 +1535,12 @@ to author functional policies that meet security best practices. - `policy_document`: The JSON policy document to use as the content for the policy. - `policy_type`: The type of policy to validate. Identity policies grant permissions to IAM principals. Identity policies include managed and inline policies for IAM roles, users, and - groups. They also include service-control policies (SCPs) that are attached to an Amazon - Web Services organization, organizational unit (OU), or an account. Resource policies grant - permissions on Amazon Web Services resources. Resource policies include trust policies for - IAM roles and bucket policies for Amazon S3 buckets. You can provide a generic input such - as identity policy or resource policy or a specific input such as managed policy or Amazon - S3 bucket policy. + groups. Resource policies grant permissions on Amazon Web Services resources. Resource + policies include trust policies for IAM roles and bucket policies for Amazon S3 buckets. + You can provide a generic input such as identity policy or resource policy or a specific + input such as managed policy or Amazon S3 bucket policy. Service control policies (SCPs) + are a type of organization policy attached to an Amazon Web Services organization, + organizational unit (OU), or an account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/account.jl b/src/services/account.jl index 16efb0b9f4..82a7d7add5 100644 --- a/src/services/account.jl +++ b/src/services/account.jl @@ -4,6 +4,66 @@ using AWS.AWSServices: account using AWS.Compat using AWS.UUIDs +""" + accept_primary_email_update(account_id, otp, primary_email) + accept_primary_email_update(account_id, otp, primary_email, params::Dict{String,<:Any}) + +Accepts the request that originated from StartPrimaryEmailUpdate to update the primary +email address (also known as the root user email address) for the specified account. + +# Arguments +- `account_id`: Specifies the 12-digit account ID number of the Amazon Web Services account + that you want to access or modify with this operation. To use this parameter, the caller + must be an identity in the organization's management account or a delegated administrator + account. The specified account ID must be a member account in the same organization. The + organization must have all features enabled, and the organization must have trusted access + enabled for the Account Management service, and optionally a delegated admin account + assigned. This operation can only be called from the management account or the delegated + administrator account of an organization for a member account. The management account + can't specify its own AccountId. +- `otp`: The OTP code sent to the PrimaryEmail specified on the StartPrimaryEmailUpdate API + call. +- `primary_email`: The new primary email address for use with the specified account. This + must match the PrimaryEmail from the StartPrimaryEmailUpdate API call. + +""" +function accept_primary_email_update( + AccountId, Otp, PrimaryEmail; aws_config::AbstractAWSConfig=global_aws_config() +) + return account( + "POST", + "/acceptPrimaryEmailUpdate", + Dict{String,Any}( + "AccountId" => AccountId, "Otp" => Otp, "PrimaryEmail" => PrimaryEmail + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_primary_email_update( + AccountId, + Otp, + PrimaryEmail, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return account( + "POST", + "/acceptPrimaryEmailUpdate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AccountId" => AccountId, "Otp" => Otp, "PrimaryEmail" => PrimaryEmail + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_alternate_contact(alternate_contact_type) delete_alternate_contact(alternate_contact_type, params::Dict{String,<:Any}) @@ -68,7 +128,8 @@ end disable_region(region_name) disable_region(region_name, params::Dict{String,<:Any}) -Disables (opts-out) a particular Region for an account. +Disables (opts-out) a particular Region for an account. The act of disabling a Region will +remove all IAM access to any resources that reside in that Region. # Arguments - `region_name`: Specifies the Region-code for a given Region name (for example, @@ -83,14 +144,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. """ function disable_region(RegionName; aws_config::AbstractAWSConfig=global_aws_config()) return account( @@ -137,14 +198,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. """ function enable_region(RegionName; aws_config::AbstractAWSConfig=global_aws_config()) return account( @@ -245,14 +306,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. """ function get_contact_information(; aws_config::AbstractAWSConfig=global_aws_config()) return account( @@ -274,6 +335,49 @@ function get_contact_information( ) end +""" + get_primary_email(account_id) + get_primary_email(account_id, params::Dict{String,<:Any}) + +Retrieves the primary email address for the specified account. + +# Arguments +- `account_id`: Specifies the 12-digit account ID number of the Amazon Web Services account + that you want to access or modify with this operation. To use this parameter, the caller + must be an identity in the organization's management account or a delegated administrator + account. The specified account ID must be a member account in the same organization. The + organization must have all features enabled, and the organization must have trusted access + enabled for the Account Management service, and optionally a delegated admin account + assigned. This operation can only be called from the management account or the delegated + administrator account of an organization for a member account. The management account + can't specify its own AccountId. + +""" +function get_primary_email(AccountId; aws_config::AbstractAWSConfig=global_aws_config()) + return account( + "POST", + "/getPrimaryEmail", + Dict{String,Any}("AccountId" => AccountId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_primary_email( + AccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return account( + "POST", + "/getPrimaryEmail", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AccountId" => AccountId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_region_opt_status(region_name) get_region_opt_status(region_name, params::Dict{String,<:Any}) @@ -291,14 +395,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. """ function get_region_opt_status( RegionName; aws_config::AbstractAWSConfig=global_aws_config() @@ -340,14 +444,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. - `"MaxResults"`: The total number of items to return in the command’s output. If the total number of items available is more than the value specified, a NextToken is provided in the command’s output. To resume pagination, provide the NextToken value in the @@ -482,14 +586,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's - management account or a delegated administrator account. The specified account ID must also - be a member account in the same organization. The organization must have all features - enabled, and the organization must have trusted access enabled for the Account Management - service, and optionally a delegated admin account assigned. The management account can't - specify its own AccountId. It must call the operation in standalone context by not - including the AccountId parameter. To call this operation on an account that is not a - member of an organization, don't specify this parameter. Instead, call the operation using - an identity belonging to the account whose contacts you wish to retrieve or modify. + management account or a delegated administrator account. The specified account ID must be a + member account in the same organization. The organization must have all features enabled, + and the organization must have trusted access enabled for the Account Management service, + and optionally a delegated admin account assigned. The management account can't specify + its own AccountId. It must call the operation in standalone context by not including the + AccountId parameter. To call this operation on an account that is not a member of an + organization, don't specify this parameter. Instead, call the operation using an identity + belonging to the account whose contacts you wish to retrieve or modify. """ function put_contact_information( ContactInformation; aws_config::AbstractAWSConfig=global_aws_config() @@ -519,3 +623,55 @@ function put_contact_information( feature_set=SERVICE_FEATURE_SET, ) end + +""" + start_primary_email_update(account_id, primary_email) + start_primary_email_update(account_id, primary_email, params::Dict{String,<:Any}) + +Starts the process to update the primary email address for the specified account. + +# Arguments +- `account_id`: Specifies the 12-digit account ID number of the Amazon Web Services account + that you want to access or modify with this operation. To use this parameter, the caller + must be an identity in the organization's management account or a delegated administrator + account. The specified account ID must be a member account in the same organization. The + organization must have all features enabled, and the organization must have trusted access + enabled for the Account Management service, and optionally a delegated admin account + assigned. This operation can only be called from the management account or the delegated + administrator account of an organization for a member account. The management account + can't specify its own AccountId. +- `primary_email`: The new primary email address (also known as the root user email + address) to use in the specified account. + +""" +function start_primary_email_update( + AccountId, PrimaryEmail; aws_config::AbstractAWSConfig=global_aws_config() +) + return account( + "POST", + "/startPrimaryEmailUpdate", + Dict{String,Any}("AccountId" => AccountId, "PrimaryEmail" => PrimaryEmail); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_primary_email_update( + AccountId, + PrimaryEmail, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return account( + "POST", + "/startPrimaryEmailUpdate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AccountId" => AccountId, "PrimaryEmail" => PrimaryEmail), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/acm_pca.jl b/src/services/acm_pca.jl index c9340eb0e2..c039eea54b 100644 --- a/src/services/acm_pca.jl +++ b/src/services/acm_pca.jl @@ -754,14 +754,15 @@ certificate signed by the preceding subordinate CA must come next, and so on unt chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following -extensions to be marked critical in the imported CA certificate or chain. Basic -constraints (must be marked critical) Subject alternative names Key usage Extended -key usage Authority key identifier Subject key identifier Issuer alternative name -Subject directory attributes Subject information access Certificate policies Policy -mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following -extensions when they are marked critical in an imported CA certificate or chain. Name -constraints Policy constraints CRL distribution points Authority information access -Freshest CRL Any other extension +extensions to be marked critical in the imported CA certificate or chain. Authority key +identifier Basic constraints (must be marked critical) Certificate policies Extended +key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints +Policy mappings Subject alternative name Subject directory attributes Subject key +identifier Subject information access Amazon Web Services Private CA rejects the +following extensions when they are marked critical in an imported CA certificate or chain. + Authority information access CRL distribution points Freshest CRL Policy constraints + Amazon Web Services Private Certificate Authority will also reject any other extension +marked as critical not contained on the preceding list of allowed extensions. # Arguments - `certificate`: The PEM-encoded certificate for a private CA. This may be a self-signed @@ -860,12 +861,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supplied during certificate issuance, Amazon Web Services Private CA applies order of operation rules to determine what information is used. - `"IdempotencyToken"`: Alphanumeric string that can be used to distinguish between calls - to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after one - minute. Therefore, if you call IssueCertificate multiple times with the same idempotency - token within one minute, Amazon Web Services Private CA recognizes that you are requesting - only one certificate and will issue only one. If you change the idempotency token for each - call, Amazon Web Services Private CA recognizes that you are requesting multiple - certificates. + to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after five + minutes. Therefore, if you call IssueCertificate multiple times with the same idempotency + token within five minutes, Amazon Web Services Private CA recognizes that you are + requesting only one certificate and will issue only one. If you change the idempotency + token for each call, Amazon Web Services Private CA recognizes that you are requesting + multiple certificates. - `"TemplateArn"`: Specifies a custom configuration template to use when issuing a certificate. If this parameter is not provided, Amazon Web Services Private CA defaults to the EndEntityCertificate/V1 template. For CA certificates, you should choose the shortest @@ -942,7 +943,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaxResults"`: Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value - in a subsequent request to retrieve additional items. + in a subsequent request to retrieve additional items. Although the maximum value is 1000, + the action only returns a maximum of 100 items. - `"NextToken"`: Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received. diff --git a/src/services/alexa_for_business.jl b/src/services/alexa_for_business.jl deleted file mode 100644 index b2bf2e1ce5..0000000000 --- a/src/services/alexa_for_business.jl +++ /dev/null @@ -1,3489 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: alexa_for_business -using AWS.Compat -using AWS.UUIDs - -""" - approve_skill(skill_id) - approve_skill(skill_id, params::Dict{String,<:Any}) - -Associates a skill with the organization under the customer's AWS account. If a skill is -private, the user implicitly accepts access to this skill during enablement. - -# Arguments -- `skill_id`: The unique identifier of the skill. - -""" -function approve_skill(SkillId; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ApproveSkill", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function approve_skill( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ApproveSkill", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_contact_with_address_book(address_book_arn, contact_arn) - associate_contact_with_address_book(address_book_arn, contact_arn, params::Dict{String,<:Any}) - -Associates a contact with a given address book. - -# Arguments -- `address_book_arn`: The ARN of the address book with which to associate the contact. -- `contact_arn`: The ARN of the contact to associate with an address book. - -""" -function associate_contact_with_address_book( - AddressBookArn, ContactArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateContactWithAddressBook", - Dict{String,Any}("AddressBookArn" => AddressBookArn, "ContactArn" => ContactArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_contact_with_address_book( - AddressBookArn, - ContactArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "AssociateContactWithAddressBook", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "AddressBookArn" => AddressBookArn, "ContactArn" => ContactArn - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_device_with_network_profile(device_arn, network_profile_arn) - associate_device_with_network_profile(device_arn, network_profile_arn, params::Dict{String,<:Any}) - -Associates a device with the specified network profile. - -# Arguments -- `device_arn`: The device ARN. -- `network_profile_arn`: The ARN of the network profile to associate with a device. - -""" -function associate_device_with_network_profile( - DeviceArn, NetworkProfileArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateDeviceWithNetworkProfile", - Dict{String,Any}( - "DeviceArn" => DeviceArn, "NetworkProfileArn" => NetworkProfileArn - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_device_with_network_profile( - DeviceArn, - NetworkProfileArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "AssociateDeviceWithNetworkProfile", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DeviceArn" => DeviceArn, "NetworkProfileArn" => NetworkProfileArn - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_device_with_room() - associate_device_with_room(params::Dict{String,<:Any}) - -Associates a device with a given room. This applies all the settings from the room profile -to the device, and all the skills in any skill groups added to that room. This operation -requires the device to be online, or else a manual sync is required. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceArn"`: The ARN of the device to associate to a room. Required. -- `"RoomArn"`: The ARN of the room with which to associate the device. Required. -""" -function associate_device_with_room(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "AssociateDeviceWithRoom"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function associate_device_with_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateDeviceWithRoom", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_skill_group_with_room() - associate_skill_group_with_room(params::Dict{String,<:Any}) - -Associates a skill group with a given room. This enables all skills in the associated skill -group on all devices in the room. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room with which to associate the skill group. Required. -- `"SkillGroupArn"`: The ARN of the skill group to associate with a room. Required. -""" -function associate_skill_group_with_room(; - aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillGroupWithRoom"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_skill_group_with_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillGroupWithRoom", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_skill_with_skill_group(skill_id) - associate_skill_with_skill_group(skill_id, params::Dict{String,<:Any}) - -Associates a skill with a skill group. - -# Arguments -- `skill_id`: The unique identifier of the skill. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"SkillGroupArn"`: The ARN of the skill group to associate the skill to. Required. -""" -function associate_skill_with_skill_group( - SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillWithSkillGroup", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_skill_with_skill_group( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillWithSkillGroup", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_skill_with_users(skill_id) - associate_skill_with_users(skill_id, params::Dict{String,<:Any}) - -Makes a private skill available for enrolled users to enable on their devices. - -# Arguments -- `skill_id`: The private skill ID you want to make available to enrolled users. - -""" -function associate_skill_with_users( - SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillWithUsers", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_skill_with_users( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "AssociateSkillWithUsers", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_address_book(name) - create_address_book(name, params::Dict{String,<:Any}) - -Creates an address book with the specified details. - -# Arguments -- `name`: The name of the address book. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique, user-specified identifier for the request that ensures - idempotency. -- `"Description"`: The description of the address book. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -""" -function create_address_book(Name; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "CreateAddressBook", - Dict{String,Any}("Name" => Name, "ClientRequestToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_address_book( - Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "CreateAddressBook", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("Name" => Name, "ClientRequestToken" => string(uuid4())), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_business_report_schedule(content_range, format) - create_business_report_schedule(content_range, format, params::Dict{String,<:Any}) - -Creates a recurring schedule for usage reports to deliver to the specified S3 location with -a specified daily or weekly interval. - -# Arguments -- `content_range`: The content range of the reports. -- `format`: The format of the generated report (individual CSV files or zipped files of - individual files). - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: The client request token. -- `"Recurrence"`: The recurrence of the reports. If this isn't specified, the report will - only be delivered one time when the API is called. -- `"S3BucketName"`: The S3 bucket name of the output reports. If this isn't specified, the - report can be retrieved from a download link by calling ListBusinessReportSchedule. -- `"S3KeyPrefix"`: The S3 key where the report is delivered. -- `"ScheduleName"`: The name identifier of the schedule. -- `"Tags"`: The tags for the business report schedule. -""" -function create_business_report_schedule( - ContentRange, Format; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "CreateBusinessReportSchedule", - Dict{String,Any}( - "ContentRange" => ContentRange, - "Format" => Format, - "ClientRequestToken" => string(uuid4()), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_business_report_schedule( - ContentRange, - Format, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateBusinessReportSchedule", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ContentRange" => ContentRange, - "Format" => Format, - "ClientRequestToken" => string(uuid4()), - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_conference_provider(conference_provider_name, conference_provider_type, meeting_setting) - create_conference_provider(conference_provider_name, conference_provider_type, meeting_setting, params::Dict{String,<:Any}) - -Adds a new conference provider under the user's AWS account. - -# Arguments -- `conference_provider_name`: The name of the conference provider. -- `conference_provider_type`: Represents a type within a list of predefined types. -- `meeting_setting`: The meeting settings for the conference provider. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: The request token of the client. -- `"IPDialIn"`: The IP endpoint and protocol for calling. -- `"PSTNDialIn"`: The information for PSTN conferencing. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -""" -function create_conference_provider( - ConferenceProviderName, - ConferenceProviderType, - MeetingSetting; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateConferenceProvider", - Dict{String,Any}( - "ConferenceProviderName" => ConferenceProviderName, - "ConferenceProviderType" => ConferenceProviderType, - "MeetingSetting" => MeetingSetting, - "ClientRequestToken" => string(uuid4()), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_conference_provider( - ConferenceProviderName, - ConferenceProviderType, - MeetingSetting, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateConferenceProvider", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ConferenceProviderName" => ConferenceProviderName, - "ConferenceProviderType" => ConferenceProviderType, - "MeetingSetting" => MeetingSetting, - "ClientRequestToken" => string(uuid4()), - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_contact(first_name) - create_contact(first_name, params::Dict{String,<:Any}) - -Creates a contact with the specified details. - -# Arguments -- `first_name`: The first name of the contact that is used to call the contact on the - device. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique, user-specified identifier for this request that ensures - idempotency. -- `"DisplayName"`: The name of the contact to display on the console. -- `"LastName"`: The last name of the contact that is used to call the contact on the device. -- `"PhoneNumber"`: The phone number of the contact in E.164 format. The phone number type - defaults to WORK. You can specify PhoneNumber or PhoneNumbers. We recommend that you use - PhoneNumbers, which lets you specify the phone number type and multiple numbers. -- `"PhoneNumbers"`: The list of phone numbers for the contact. -- `"SipAddresses"`: The list of SIP addresses for the contact. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -""" -function create_contact(FirstName; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "CreateContact", - Dict{String,Any}("FirstName" => FirstName, "ClientRequestToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_contact( - FirstName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateContact", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "FirstName" => FirstName, "ClientRequestToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_gateway_group(client_request_token, name) - create_gateway_group(client_request_token, name, params::Dict{String,<:Any}) - -Creates a gateway group with the specified details. - -# Arguments -- `client_request_token`: A unique, user-specified identifier for the request that ensures - idempotency. -- `name`: The name of the gateway group. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the gateway group. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -""" -function create_gateway_group( - ClientRequestToken, Name; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "CreateGatewayGroup", - Dict{String,Any}("ClientRequestToken" => ClientRequestToken, "Name" => Name); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_gateway_group( - ClientRequestToken, - Name, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateGatewayGroup", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClientRequestToken" => ClientRequestToken, "Name" => Name - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_network_profile(client_request_token, network_profile_name, security_type, ssid) - create_network_profile(client_request_token, network_profile_name, security_type, ssid, params::Dict{String,<:Any}) - -Creates a network profile with the specified details. - -# Arguments -- `client_request_token`: -- `network_profile_name`: The name of the network profile associated with a device. -- `security_type`: The security type of the Wi-Fi network. This can be WPA2_ENTERPRISE, - WPA2_PSK, WPA_PSK, WEP, or OPEN. -- `ssid`: The SSID of the Wi-Fi network. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CertificateAuthorityArn"`: The ARN of the Private Certificate Authority (PCA) created - in AWS Certificate Manager (ACM). This is used to issue certificates to the devices. -- `"CurrentPassword"`: The current password of the Wi-Fi network. -- `"Description"`: Detailed information about a device's network profile. -- `"EapMethod"`: The authentication standard that is used in the EAP framework. Currently, - EAP_TLS is supported. -- `"NextPassword"`: The next, or subsequent, password of the Wi-Fi network. This password - is asynchronously transmitted to the device and is used when the password of the network - changes to NextPassword. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -- `"TrustAnchors"`: The root certificates of your authentication server that is installed - on your devices and used to trust your authentication server during EAP negotiation. -""" -function create_network_profile( - ClientRequestToken, - NetworkProfileName, - SecurityType, - Ssid; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateNetworkProfile", - Dict{String,Any}( - "ClientRequestToken" => ClientRequestToken, - "NetworkProfileName" => NetworkProfileName, - "SecurityType" => SecurityType, - "Ssid" => Ssid, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_network_profile( - ClientRequestToken, - NetworkProfileName, - SecurityType, - Ssid, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateNetworkProfile", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClientRequestToken" => ClientRequestToken, - "NetworkProfileName" => NetworkProfileName, - "SecurityType" => SecurityType, - "Ssid" => Ssid, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_profile(address, distance_unit, profile_name, temperature_unit, timezone, wake_word) - create_profile(address, distance_unit, profile_name, temperature_unit, timezone, wake_word, params::Dict{String,<:Any}) - -Creates a new room profile with the specified details. - -# Arguments -- `address`: The valid address for the room. -- `distance_unit`: The distance unit to be used by devices in the profile. -- `profile_name`: The name of a room profile. -- `temperature_unit`: The temperature unit to be used by devices in the profile. -- `timezone`: The time zone used by a room profile. -- `wake_word`: A wake word for Alexa, Echo, Amazon, or a computer. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: The user-specified token that is used during the creation of a - profile. -- `"DataRetentionOptIn"`: Whether data retention of the profile is enabled. -- `"Locale"`: The locale of the room profile. (This is currently only available to a - limited preview audience.) -- `"MaxVolumeLimit"`: The maximum volume limit for a room profile. -- `"MeetingRoomConfiguration"`: The meeting room settings of a room profile. -- `"PSTNEnabled"`: Whether PSTN calling is enabled. -- `"SetupModeDisabled"`: Whether room profile setup is enabled. -- `"Tags"`: The tags for the profile. -""" -function create_profile( - Address, - DistanceUnit, - ProfileName, - TemperatureUnit, - Timezone, - WakeWord; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateProfile", - Dict{String,Any}( - "Address" => Address, - "DistanceUnit" => DistanceUnit, - "ProfileName" => ProfileName, - "TemperatureUnit" => TemperatureUnit, - "Timezone" => Timezone, - "WakeWord" => WakeWord, - "ClientRequestToken" => string(uuid4()), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_profile( - Address, - DistanceUnit, - ProfileName, - TemperatureUnit, - Timezone, - WakeWord, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateProfile", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "Address" => Address, - "DistanceUnit" => DistanceUnit, - "ProfileName" => ProfileName, - "TemperatureUnit" => TemperatureUnit, - "Timezone" => Timezone, - "WakeWord" => WakeWord, - "ClientRequestToken" => string(uuid4()), - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_room(room_name) - create_room(room_name, params::Dict{String,<:Any}) - -Creates a room with the specified details. - -# Arguments -- `room_name`: The name for the room. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique, user-specified identifier for this request that ensures - idempotency. -- `"Description"`: The description for the room. -- `"ProfileArn"`: The profile ARN for the room. This is required. -- `"ProviderCalendarId"`: The calendar ARN for the room. -- `"Tags"`: The tags for the room. -""" -function create_room(RoomName; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "CreateRoom", - Dict{String,Any}("RoomName" => RoomName, "ClientRequestToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_room( - RoomName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateRoom", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "RoomName" => RoomName, "ClientRequestToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_skill_group(skill_group_name) - create_skill_group(skill_group_name, params::Dict{String,<:Any}) - -Creates a skill group with a specified name and description. - -# Arguments -- `skill_group_name`: The name for the skill group. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique, user-specified identifier for this request that ensures - idempotency. -- `"Description"`: The description for the skill group. -- `"Tags"`: The tags for the skill group. -""" -function create_skill_group( - SkillGroupName; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "CreateSkillGroup", - Dict{String,Any}( - "SkillGroupName" => SkillGroupName, "ClientRequestToken" => string(uuid4()) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_skill_group( - SkillGroupName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "CreateSkillGroup", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "SkillGroupName" => SkillGroupName, - "ClientRequestToken" => string(uuid4()), - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_user(user_id) - create_user(user_id, params::Dict{String,<:Any}) - -Creates a user. - -# Arguments -- `user_id`: The ARN for the user. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique, user-specified identifier for this request that ensures - idempotency. -- `"Email"`: The email address for the user. -- `"FirstName"`: The first name for the user. -- `"LastName"`: The last name for the user. -- `"Tags"`: The tags for the user. -""" -function create_user(UserId; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "CreateUser", - Dict{String,Any}("UserId" => UserId, "ClientRequestToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_user( - UserId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "CreateUser", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "UserId" => UserId, "ClientRequestToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_address_book(address_book_arn) - delete_address_book(address_book_arn, params::Dict{String,<:Any}) - -Deletes an address book by the address book ARN. - -# Arguments -- `address_book_arn`: The ARN of the address book to delete. - -""" -function delete_address_book( - AddressBookArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteAddressBook", - Dict{String,Any}("AddressBookArn" => AddressBookArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_address_book( - AddressBookArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteAddressBook", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("AddressBookArn" => AddressBookArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_business_report_schedule(schedule_arn) - delete_business_report_schedule(schedule_arn, params::Dict{String,<:Any}) - -Deletes the recurring report delivery schedule with the specified schedule ARN. - -# Arguments -- `schedule_arn`: The ARN of the business report schedule. - -""" -function delete_business_report_schedule( - ScheduleArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteBusinessReportSchedule", - Dict{String,Any}("ScheduleArn" => ScheduleArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_business_report_schedule( - ScheduleArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteBusinessReportSchedule", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ScheduleArn" => ScheduleArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_conference_provider(conference_provider_arn) - delete_conference_provider(conference_provider_arn, params::Dict{String,<:Any}) - -Deletes a conference provider. - -# Arguments -- `conference_provider_arn`: The ARN of the conference provider. - -""" -function delete_conference_provider( - ConferenceProviderArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteConferenceProvider", - Dict{String,Any}("ConferenceProviderArn" => ConferenceProviderArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_conference_provider( - ConferenceProviderArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteConferenceProvider", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ConferenceProviderArn" => ConferenceProviderArn), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_contact(contact_arn) - delete_contact(contact_arn, params::Dict{String,<:Any}) - -Deletes a contact by the contact ARN. - -# Arguments -- `contact_arn`: The ARN of the contact to delete. - -""" -function delete_contact(ContactArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteContact", - Dict{String,Any}("ContactArn" => ContactArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_contact( - ContactArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteContact", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ContactArn" => ContactArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_device(device_arn) - delete_device(device_arn, params::Dict{String,<:Any}) - -Removes a device from Alexa For Business. - -# Arguments -- `device_arn`: The ARN of the device for which to request details. - -""" -function delete_device(DeviceArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteDevice", - Dict{String,Any}("DeviceArn" => DeviceArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_device( - DeviceArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteDevice", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("DeviceArn" => DeviceArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_device_usage_data(device_arn, device_usage_type) - delete_device_usage_data(device_arn, device_usage_type, params::Dict{String,<:Any}) - -When this action is called for a specified shared device, it allows authorized users to -delete the device's entire previous history of voice input data and associated response -data. This action can be called once every 24 hours for a specific shared device. - -# Arguments -- `device_arn`: The ARN of the device. -- `device_usage_type`: The type of usage data to delete. - -""" -function delete_device_usage_data( - DeviceArn, DeviceUsageType; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteDeviceUsageData", - Dict{String,Any}("DeviceArn" => DeviceArn, "DeviceUsageType" => DeviceUsageType); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_device_usage_data( - DeviceArn, - DeviceUsageType, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteDeviceUsageData", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DeviceArn" => DeviceArn, "DeviceUsageType" => DeviceUsageType - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_gateway_group(gateway_group_arn) - delete_gateway_group(gateway_group_arn, params::Dict{String,<:Any}) - -Deletes a gateway group. - -# Arguments -- `gateway_group_arn`: The ARN of the gateway group to delete. - -""" -function delete_gateway_group( - GatewayGroupArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteGatewayGroup", - Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_gateway_group( - GatewayGroupArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteGatewayGroup", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_network_profile(network_profile_arn) - delete_network_profile(network_profile_arn, params::Dict{String,<:Any}) - -Deletes a network profile by the network profile ARN. - -# Arguments -- `network_profile_arn`: The ARN of the network profile associated with a device. - -""" -function delete_network_profile( - NetworkProfileArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteNetworkProfile", - Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_network_profile( - NetworkProfileArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteNetworkProfile", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_profile() - delete_profile(params::Dict{String,<:Any}) - -Deletes a room profile by the profile ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ProfileArn"`: The ARN of the room profile to delete. Required. -""" -function delete_profile(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteProfile"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function delete_profile( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteProfile", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_room() - delete_room(params::Dict{String,<:Any}) - -Deletes a room by the room ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room to delete. Required. -""" -function delete_room(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteRoom"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function delete_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteRoom", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_room_skill_parameter(parameter_key, skill_id) - delete_room_skill_parameter(parameter_key, skill_id, params::Dict{String,<:Any}) - -Deletes room skill parameter details by room, skill, and parameter key ID. - -# Arguments -- `parameter_key`: The room skill parameter key for which to remove details. -- `skill_id`: The ID of the skill from which to remove the room skill parameter details. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room from which to remove the room skill parameter details. -""" -function delete_room_skill_parameter( - ParameterKey, SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteRoomSkillParameter", - Dict{String,Any}("ParameterKey" => ParameterKey, "SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_room_skill_parameter( - ParameterKey, - SkillId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteRoomSkillParameter", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ParameterKey" => ParameterKey, "SkillId" => SkillId), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_skill_authorization(skill_id) - delete_skill_authorization(skill_id, params::Dict{String,<:Any}) - -Unlinks a third-party account from a skill. - -# Arguments -- `skill_id`: The unique identifier of a skill. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The room that the skill is authorized for. -""" -function delete_skill_authorization( - SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteSkillAuthorization", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_skill_authorization( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteSkillAuthorization", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_skill_group() - delete_skill_group(params::Dict{String,<:Any}) - -Deletes a skill group by skill group ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"SkillGroupArn"`: The ARN of the skill group to delete. Required. -""" -function delete_skill_group(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteSkillGroup"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function delete_skill_group( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DeleteSkillGroup", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_user(enrollment_id) - delete_user(enrollment_id, params::Dict{String,<:Any}) - -Deletes a specified user by user ARN and enrollment ARN. - -# Arguments -- `enrollment_id`: The ARN of the user's enrollment in the organization. Required. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"UserArn"`: The ARN of the user to delete in the organization. Required. -""" -function delete_user(EnrollmentId; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DeleteUser", - Dict{String,Any}("EnrollmentId" => EnrollmentId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_user( - EnrollmentId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DeleteUser", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("EnrollmentId" => EnrollmentId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_contact_from_address_book(address_book_arn, contact_arn) - disassociate_contact_from_address_book(address_book_arn, contact_arn, params::Dict{String,<:Any}) - -Disassociates a contact from a given address book. - -# Arguments -- `address_book_arn`: The ARN of the address from which to disassociate the contact. -- `contact_arn`: The ARN of the contact to disassociate from an address book. - -""" -function disassociate_contact_from_address_book( - AddressBookArn, ContactArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateContactFromAddressBook", - Dict{String,Any}("AddressBookArn" => AddressBookArn, "ContactArn" => ContactArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_contact_from_address_book( - AddressBookArn, - ContactArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "DisassociateContactFromAddressBook", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "AddressBookArn" => AddressBookArn, "ContactArn" => ContactArn - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_device_from_room() - disassociate_device_from_room(params::Dict{String,<:Any}) - -Disassociates a device from its current room. The device continues to be connected to the -Wi-Fi network and is still registered to the account. The device settings and skills are -removed from the room. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceArn"`: The ARN of the device to disassociate from a room. Required. -""" -function disassociate_device_from_room(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "DisassociateDeviceFromRoom"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function disassociate_device_from_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateDeviceFromRoom", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_skill_from_skill_group(skill_id) - disassociate_skill_from_skill_group(skill_id, params::Dict{String,<:Any}) - -Disassociates a skill from a skill group. - -# Arguments -- `skill_id`: The ARN of a skill group to associate to a skill. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"SkillGroupArn"`: The unique identifier of a skill. Required. -""" -function disassociate_skill_from_skill_group( - SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillFromSkillGroup", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_skill_from_skill_group( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillFromSkillGroup", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_skill_from_users(skill_id) - disassociate_skill_from_users(skill_id, params::Dict{String,<:Any}) - -Makes a private skill unavailable for enrolled users and prevents them from enabling it on -their devices. - -# Arguments -- `skill_id`: The private skill ID you want to make unavailable for enrolled users. - -""" -function disassociate_skill_from_users( - SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillFromUsers", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_skill_from_users( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillFromUsers", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_skill_group_from_room() - disassociate_skill_group_from_room(params::Dict{String,<:Any}) - -Disassociates a skill group from a specified room. This disables all skills in the skill -group on all devices in the room. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room from which the skill group is to be disassociated. - Required. -- `"SkillGroupArn"`: The ARN of the skill group to disassociate from a room. Required. -""" -function disassociate_skill_group_from_room(; - aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillGroupFromRoom"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_skill_group_from_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "DisassociateSkillGroupFromRoom", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - forget_smart_home_appliances(room_arn) - forget_smart_home_appliances(room_arn, params::Dict{String,<:Any}) - -Forgets smart home appliances associated to a room. - -# Arguments -- `room_arn`: The room that the appliances are associated with. - -""" -function forget_smart_home_appliances( - RoomArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ForgetSmartHomeAppliances", - Dict{String,Any}("RoomArn" => RoomArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function forget_smart_home_appliances( - RoomArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ForgetSmartHomeAppliances", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RoomArn" => RoomArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_address_book(address_book_arn) - get_address_book(address_book_arn, params::Dict{String,<:Any}) - -Gets address the book details by the address book ARN. - -# Arguments -- `address_book_arn`: The ARN of the address book for which to request details. - -""" -function get_address_book(AddressBookArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetAddressBook", - Dict{String,Any}("AddressBookArn" => AddressBookArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_address_book( - AddressBookArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetAddressBook", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("AddressBookArn" => AddressBookArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_conference_preference() - get_conference_preference(params::Dict{String,<:Any}) - -Retrieves the existing conference preferences. - -""" -function get_conference_preference(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetConferencePreference"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_conference_preference( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetConferencePreference", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_conference_provider(conference_provider_arn) - get_conference_provider(conference_provider_arn, params::Dict{String,<:Any}) - -Gets details about a specific conference provider. - -# Arguments -- `conference_provider_arn`: The ARN of the newly created conference provider. - -""" -function get_conference_provider( - ConferenceProviderArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetConferenceProvider", - Dict{String,Any}("ConferenceProviderArn" => ConferenceProviderArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_conference_provider( - ConferenceProviderArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetConferenceProvider", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ConferenceProviderArn" => ConferenceProviderArn), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_contact(contact_arn) - get_contact(contact_arn, params::Dict{String,<:Any}) - -Gets the contact details by the contact ARN. - -# Arguments -- `contact_arn`: The ARN of the contact for which to request details. - -""" -function get_contact(ContactArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetContact", - Dict{String,Any}("ContactArn" => ContactArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_contact( - ContactArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetContact", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ContactArn" => ContactArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_device() - get_device(params::Dict{String,<:Any}) - -Gets the details of a device by device ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceArn"`: The ARN of the device for which to request details. Required. -""" -function get_device(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetDevice"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_device( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetDevice", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - get_gateway(gateway_arn) - get_gateway(gateway_arn, params::Dict{String,<:Any}) - -Retrieves the details of a gateway. - -# Arguments -- `gateway_arn`: The ARN of the gateway to get. - -""" -function get_gateway(GatewayArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetGateway", - Dict{String,Any}("GatewayArn" => GatewayArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_gateway( - GatewayArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetGateway", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("GatewayArn" => GatewayArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_gateway_group(gateway_group_arn) - get_gateway_group(gateway_group_arn, params::Dict{String,<:Any}) - -Retrieves the details of a gateway group. - -# Arguments -- `gateway_group_arn`: The ARN of the gateway group to get. - -""" -function get_gateway_group( - GatewayGroupArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetGatewayGroup", - Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_gateway_group( - GatewayGroupArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetGatewayGroup", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_invitation_configuration() - get_invitation_configuration(params::Dict{String,<:Any}) - -Retrieves the configured values for the user enrollment invitation email template. - -""" -function get_invitation_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetInvitationConfiguration"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_invitation_configuration( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetInvitationConfiguration", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_network_profile(network_profile_arn) - get_network_profile(network_profile_arn, params::Dict{String,<:Any}) - -Gets the network profile details by the network profile ARN. - -# Arguments -- `network_profile_arn`: The ARN of the network profile associated with a device. - -""" -function get_network_profile( - NetworkProfileArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetNetworkProfile", - Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_network_profile( - NetworkProfileArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetNetworkProfile", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_profile() - get_profile(params::Dict{String,<:Any}) - -Gets the details of a room profile by profile ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ProfileArn"`: The ARN of the room profile for which to request details. Required. -""" -function get_profile(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetProfile"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_profile( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetProfile", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - get_room() - get_room(params::Dict{String,<:Any}) - -Gets room details by room ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room for which to request details. Required. -""" -function get_room(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetRoom"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetRoom", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - get_room_skill_parameter(parameter_key, skill_id) - get_room_skill_parameter(parameter_key, skill_id, params::Dict{String,<:Any}) - -Gets room skill parameter details by room, skill, and parameter key ARN. - -# Arguments -- `parameter_key`: The room skill parameter key for which to get details. Required. -- `skill_id`: The ARN of the skill from which to get the room skill parameter details. - Required. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room from which to get the room skill parameter details. -""" -function get_room_skill_parameter( - ParameterKey, SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetRoomSkillParameter", - Dict{String,Any}("ParameterKey" => ParameterKey, "SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_room_skill_parameter( - ParameterKey, - SkillId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "GetRoomSkillParameter", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ParameterKey" => ParameterKey, "SkillId" => SkillId), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_skill_group() - get_skill_group(params::Dict{String,<:Any}) - -Gets skill group details by skill group ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"SkillGroupArn"`: The ARN of the skill group for which to get details. Required. -""" -function get_skill_group(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "GetSkillGroup"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_skill_group( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "GetSkillGroup", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_business_report_schedules() - list_business_report_schedules(params::Dict{String,<:Any}) - -Lists the details of the schedules that a user configured. A download URL of the report -associated with each schedule is returned every time this action is called. A new download -URL is returned each time, and is valid for 24 hours. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of schedules listed in the call. -- `"NextToken"`: The token used to list the remaining schedules from the previous API call. -""" -function list_business_report_schedules(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListBusinessReportSchedules"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_business_report_schedules( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListBusinessReportSchedules", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_conference_providers() - list_conference_providers(params::Dict{String,<:Any}) - -Lists conference providers under a specific AWS account. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of conference providers to be returned, per paginated - calls. -- `"NextToken"`: The tokens used for pagination. -""" -function list_conference_providers(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListConferenceProviders"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_conference_providers( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListConferenceProviders", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_device_events(device_arn) - list_device_events(device_arn, params::Dict{String,<:Any}) - -Lists the device event history, including device connection status, for up to 30 days. - -# Arguments -- `device_arn`: The ARN of a device. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"EventType"`: The event type to filter device events. If EventType isn't specified, this - returns a list of all device events in reverse chronological order. If EventType is - specified, this returns a list of device events for that EventType in reverse chronological - order. -- `"MaxResults"`: The maximum number of results to include in the response. The default - value is 50. If more results exist than the specified MaxResults value, a token is included - in the response so that the remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response only - includes results beyond the token, up to the value specified by MaxResults. When the end of - results is reached, the response has a value of null. -""" -function list_device_events(DeviceArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListDeviceEvents", - Dict{String,Any}("DeviceArn" => DeviceArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_device_events( - DeviceArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "ListDeviceEvents", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("DeviceArn" => DeviceArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_gateway_groups() - list_gateway_groups(params::Dict{String,<:Any}) - -Retrieves a list of gateway group summaries. Use GetGatewayGroup to retrieve details of a -specific gateway group. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of gateway group summaries to return. The default is - 50. -- `"NextToken"`: The token used to paginate though multiple pages of gateway group - summaries. -""" -function list_gateway_groups(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListGatewayGroups"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_gateway_groups( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListGatewayGroups", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_gateways() - list_gateways(params::Dict{String,<:Any}) - -Retrieves a list of gateway summaries. Use GetGateway to retrieve details of a specific -gateway. An optional gateway group ARN can be provided to only retrieve gateway summaries -of gateways that are associated with that gateway group ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"GatewayGroupArn"`: The gateway group ARN for which to list gateways. -- `"MaxResults"`: The maximum number of gateway summaries to return. The default is 50. -- `"NextToken"`: The token used to paginate though multiple pages of gateway summaries. -""" -function list_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListGateways"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_gateways( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListGateways", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_skills() - list_skills(params::Dict{String,<:Any}) - -Lists all enabled skills in a specific skill group. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"EnablementType"`: Whether the skill is enabled under the user's account. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -- `"SkillGroupArn"`: The ARN of the skill group for which to list enabled skills. -- `"SkillType"`: Whether the skill is publicly available or is a private skill. -""" -function list_skills(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListSkills"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_skills( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListSkills", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_skills_store_categories() - list_skills_store_categories(params::Dict{String,<:Any}) - -Lists all categories in the Alexa skill store. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of categories returned, per paginated calls. -- `"NextToken"`: The tokens used for pagination. -""" -function list_skills_store_categories(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListSkillsStoreCategories"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_skills_store_categories( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListSkillsStoreCategories", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_skills_store_skills_by_category(category_id) - list_skills_store_skills_by_category(category_id, params::Dict{String,<:Any}) - -Lists all skills in the Alexa skill store by category. - -# Arguments -- `category_id`: The category ID for which the skills are being retrieved from the skill - store. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of skills returned per paginated calls. -- `"NextToken"`: The tokens used for pagination. -""" -function list_skills_store_skills_by_category( - CategoryId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListSkillsStoreSkillsByCategory", - Dict{String,Any}("CategoryId" => CategoryId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_skills_store_skills_by_category( - CategoryId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "ListSkillsStoreSkillsByCategory", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("CategoryId" => CategoryId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_smart_home_appliances(room_arn) - list_smart_home_appliances(room_arn, params::Dict{String,<:Any}) - -Lists all of the smart home appliances associated with a room. - -# Arguments -- `room_arn`: The room that the appliances are associated with. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of appliances to be returned, per paginated calls. -- `"NextToken"`: The tokens used for pagination. -""" -function list_smart_home_appliances( - RoomArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListSmartHomeAppliances", - Dict{String,Any}("RoomArn" => RoomArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_smart_home_appliances( - RoomArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListSmartHomeAppliances", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RoomArn" => RoomArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_tags(arn) - list_tags(arn, params::Dict{String,<:Any}) - -Lists all tags for the specified resource. - -# Arguments -- `arn`: The ARN of the specified resource for which to list tags. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -""" -function list_tags(Arn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ListTags", - Dict{String,Any}("Arn" => Arn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_tags( - Arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "ListTags", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Arn" => Arn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_conference_preference(conference_preference) - put_conference_preference(conference_preference, params::Dict{String,<:Any}) - -Sets the conference preferences on a specific conference provider at the account level. - -# Arguments -- `conference_preference`: The conference preference of a specific conference provider. - -""" -function put_conference_preference( - ConferencePreference; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "PutConferencePreference", - Dict{String,Any}("ConferencePreference" => ConferencePreference); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_conference_preference( - ConferencePreference, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "PutConferencePreference", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ConferencePreference" => ConferencePreference), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_invitation_configuration(organization_name) - put_invitation_configuration(organization_name, params::Dict{String,<:Any}) - -Configures the email template for the user enrollment invitation with the specified -attributes. - -# Arguments -- `organization_name`: The name of the organization sending the enrollment invite to a user. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ContactEmail"`: The email ID of the organization or individual contact that the - enrolled user can use. -- `"PrivateSkillIds"`: The list of private skill IDs that you want to recommend to the user - to enable in the invitation. -""" -function put_invitation_configuration( - OrganizationName; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "PutInvitationConfiguration", - Dict{String,Any}("OrganizationName" => OrganizationName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_invitation_configuration( - OrganizationName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "PutInvitationConfiguration", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("OrganizationName" => OrganizationName), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_room_skill_parameter(room_skill_parameter, skill_id) - put_room_skill_parameter(room_skill_parameter, skill_id, params::Dict{String,<:Any}) - -Updates room skill parameter details by room, skill, and parameter key ID. Not all skills -have a room skill parameter. - -# Arguments -- `room_skill_parameter`: The updated room skill parameter. Required. -- `skill_id`: The ARN of the skill associated with the room skill parameter. Required. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The ARN of the room associated with the room skill parameter. Required. -""" -function put_room_skill_parameter( - RoomSkillParameter, SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "PutRoomSkillParameter", - Dict{String,Any}("RoomSkillParameter" => RoomSkillParameter, "SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_room_skill_parameter( - RoomSkillParameter, - SkillId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "PutRoomSkillParameter", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "RoomSkillParameter" => RoomSkillParameter, "SkillId" => SkillId - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_skill_authorization(authorization_result, skill_id) - put_skill_authorization(authorization_result, skill_id, params::Dict{String,<:Any}) - -Links a user's account to a third-party skill provider. If this API operation is called by -an assumed IAM role, the skill being linked must be a private skill. Also, the skill must -be owned by the AWS account that assumed the IAM role. - -# Arguments -- `authorization_result`: The authorization result specific to OAUTH code grant output. - \"Code” must be populated in the AuthorizationResult map to establish the authorization. -- `skill_id`: The unique identifier of a skill. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RoomArn"`: The room that the skill is authorized for. -""" -function put_skill_authorization( - AuthorizationResult, SkillId; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "PutSkillAuthorization", - Dict{String,Any}( - "AuthorizationResult" => AuthorizationResult, "SkillId" => SkillId - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_skill_authorization( - AuthorizationResult, - SkillId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "PutSkillAuthorization", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "AuthorizationResult" => AuthorizationResult, "SkillId" => SkillId - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - register_avsdevice(amazon_id, client_id, product_id, user_code) - register_avsdevice(amazon_id, client_id, product_id, user_code, params::Dict{String,<:Any}) - -Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using -Alexa Voice Service (AVS). - -# Arguments -- `amazon_id`: The device type ID for your AVS device generated by Amazon when the OEM - creates a new product on Amazon's Developer Console. -- `client_id`: The client ID of the OEM used for code-based linking authorization on an AVS - device. -- `product_id`: The product ID used to identify your AVS device during authorization. -- `user_code`: The code that is obtained after your AVS device has made a POST request to - LWA as a part of the Device Authorization Request component of the OAuth code-based linking - specification. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceSerialNumber"`: The key generated by the OEM that uniquely identifies a specified - instance of your AVS device. -- `"RoomArn"`: The Amazon Resource Name (ARN) of the room with which to associate your AVS - device. -- `"Tags"`: The tags to be added to the specified resource. Do not provide system tags. -""" -function register_avsdevice( - AmazonId, - ClientId, - ProductId, - UserCode; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "RegisterAVSDevice", - Dict{String,Any}( - "AmazonId" => AmazonId, - "ClientId" => ClientId, - "ProductId" => ProductId, - "UserCode" => UserCode, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function register_avsdevice( - AmazonId, - ClientId, - ProductId, - UserCode, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "RegisterAVSDevice", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "AmazonId" => AmazonId, - "ClientId" => ClientId, - "ProductId" => ProductId, - "UserCode" => UserCode, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - reject_skill(skill_id) - reject_skill(skill_id, params::Dict{String,<:Any}) - -Disassociates a skill from the organization under a user's AWS account. If the skill is a -private skill, it moves to an AcceptStatus of PENDING. Any private or public skill that is -rejected can be added later by calling the ApproveSkill API. - -# Arguments -- `skill_id`: The unique identifier of the skill. - -""" -function reject_skill(SkillId; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "RejectSkill", - Dict{String,Any}("SkillId" => SkillId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function reject_skill( - SkillId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "RejectSkill", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("SkillId" => SkillId), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - resolve_room(skill_id, user_id) - resolve_room(skill_id, user_id, params::Dict{String,<:Any}) - -Determines the details for the room from which a skill request was invoked. This operation -is used by skill developers. To query ResolveRoom from an Alexa skill, the skill ID needs -to be authorized. When the skill is using an AWS Lambda function, the skill is -automatically authorized when you publish your skill as a private skill to your AWS -account. Skills that are hosted using a custom web service must be manually authorized. To -get your skill authorized, contact AWS Support with your AWS account ID that queries the -ResolveRoom API and skill ID. - -# Arguments -- `skill_id`: The ARN of the skill that was requested. Required. -- `user_id`: The ARN of the user. Required. - -""" -function resolve_room(SkillId, UserId; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "ResolveRoom", - Dict{String,Any}("SkillId" => SkillId, "UserId" => UserId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function resolve_room( - SkillId, - UserId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "ResolveRoom", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("SkillId" => SkillId, "UserId" => UserId), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - revoke_invitation() - revoke_invitation(params::Dict{String,<:Any}) - -Revokes an invitation and invalidates the enrollment URL. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"EnrollmentId"`: The ARN of the enrollment invitation to revoke. Required. -- `"UserArn"`: The ARN of the user for whom to revoke an enrollment invitation. Required. -""" -function revoke_invitation(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "RevokeInvitation"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function revoke_invitation( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "RevokeInvitation", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_address_books() - search_address_books(params::Dict{String,<:Any}) - -Searches address books and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of address books. The supported - filter key is AddressBookName. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response only - includes results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use in listing the specified set of address books. - The supported sort key is AddressBookName. -""" -function search_address_books(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchAddressBooks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_address_books( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchAddressBooks", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_contacts() - search_contacts(params::Dict{String,<:Any}) - -Searches contacts and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of address books. The supported - filter keys are DisplayName, FirstName, LastName, and AddressBookArns. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response only - includes results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use in listing the specified set of contacts. The - supported sort keys are DisplayName, FirstName, and LastName. -""" -function search_contacts(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchContacts"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_contacts( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchContacts", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_devices() - search_devices(params::Dict{String,<:Any}) - -Searches devices and lists the ones that meet a set of filter criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of devices. Supported filter keys - are DeviceName, DeviceStatus, DeviceStatusDetailCode, RoomName, DeviceType, - DeviceSerialNumber, UnassociatedOnly, ConnectionStatus (ONLINE and OFFLINE), - NetworkProfileName, NetworkProfileArn, Feature, and FailureCode. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use in listing the specified set of devices. - Supported sort keys are DeviceName, DeviceStatus, RoomName, DeviceType, DeviceSerialNumber, - ConnectionStatus, NetworkProfileName, NetworkProfileArn, Feature, and FailureCode. -""" -function search_devices(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchDevices"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_devices( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchDevices", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_network_profiles() - search_network_profiles(params::Dict{String,<:Any}) - -Searches network profiles and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of network profiles. Valid - filters are NetworkProfileName, Ssid, and SecurityType. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use to list the specified set of network profiles. - Valid sort criteria includes NetworkProfileName, Ssid, and SecurityType. -""" -function search_network_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchNetworkProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_network_profiles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchNetworkProfiles", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - search_profiles() - search_profiles(params::Dict{String,<:Any}) - -Searches room profiles and lists the ones that meet a set of filter criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of room profiles. Supported - filter keys are ProfileName and Address. Required. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use in listing the specified set of room profiles. - Supported sort keys are ProfileName and Address. -""" -function search_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_profiles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchProfiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_rooms() - search_rooms(params::Dict{String,<:Any}) - -Searches rooms and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of rooms. The supported filter - keys are RoomName and ProfileName. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. -- `"SortCriteria"`: The sort order to use in listing the specified set of rooms. The - supported sort keys are RoomName and ProfileName. -""" -function search_rooms(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchRooms"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_rooms( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchRooms", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_skill_groups() - search_skill_groups(params::Dict{String,<:Any}) - -Searches skill groups and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use to list a specified set of skill groups. The supported - filter key is SkillGroupName. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. Required. -- `"SortCriteria"`: The sort order to use in listing the specified set of skill groups. The - supported sort key is SkillGroupName. -""" -function search_skill_groups(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchSkillGroups"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_skill_groups( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchSkillGroups", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - search_users() - search_users(params::Dict{String,<:Any}) - -Searches users and lists the ones that meet a set of filter and sort criteria. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: The filters to use for listing a specific set of users. Required. Supported - filter keys are UserId, FirstName, LastName, Email, and EnrollmentStatus. -- `"MaxResults"`: The maximum number of results to include in the response. If more results - exist than the specified MaxResults value, a token is included in the response so that the - remaining results can be retrieved. Required. -- `"NextToken"`: An optional token returned from a prior request. Use this token for - pagination of results from this action. If this parameter is specified, the response - includes only results beyond the token, up to the value specified by MaxResults. Required. -- `"SortCriteria"`: The sort order to use in listing the filtered set of users. Required. - Supported sort keys are UserId, FirstName, LastName, Email, and EnrollmentStatus. -""" -function search_users(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SearchUsers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function search_users( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SearchUsers", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - send_announcement(client_request_token, content, room_filters) - send_announcement(client_request_token, content, room_filters, params::Dict{String,<:Any}) - -Triggers an asynchronous flow to send text, SSML, or audio announcements to rooms that are -identified by a search or filter. - -# Arguments -- `client_request_token`: The unique, user-specified identifier for the request that - ensures idempotency. -- `content`: The announcement content. This can contain only one of the three possible - announcement types (text, SSML or audio). -- `room_filters`: The filters to use to send an announcement to a specified list of rooms. - The supported filter keys are RoomName, ProfileName, RoomArn, and ProfileArn. To send to - all rooms, specify an empty RoomFilters list. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"TimeToLiveInSeconds"`: The time to live for an announcement. Default is 300. If - delivery doesn't occur within this time, the announcement is not delivered. -""" -function send_announcement( - ClientRequestToken, - Content, - RoomFilters; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "SendAnnouncement", - Dict{String,Any}( - "ClientRequestToken" => ClientRequestToken, - "Content" => Content, - "RoomFilters" => RoomFilters, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function send_announcement( - ClientRequestToken, - Content, - RoomFilters, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "SendAnnouncement", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClientRequestToken" => ClientRequestToken, - "Content" => Content, - "RoomFilters" => RoomFilters, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - send_invitation() - send_invitation(params::Dict{String,<:Any}) - -Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or -until you call this operation again, whichever comes first. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"UserArn"`: The ARN of the user to whom to send an invitation. Required. -""" -function send_invitation(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "SendInvitation"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function send_invitation( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "SendInvitation", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - start_device_sync(features) - start_device_sync(features, params::Dict{String,<:Any}) - -Resets a device and its account to the known default settings. This clears all information -and settings set by previous users in the following ways: Bluetooth - This unpairs all -bluetooth devices paired with your echo device. Volume - This resets the echo device's -volume to the default value. Notifications - This clears all notifications from your echo -device. Lists - This clears all to-do items from your echo device. Settings - This -internally syncs the room's profile (if the device is assigned to a room), contacts, -address books, delegation access for account linking, and communications (if enabled on the -room profile). - -# Arguments -- `features`: Request structure to start the device sync. Required. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceArn"`: The ARN of the device to sync. Required. -- `"RoomArn"`: The ARN of the room with which the device to sync is associated. Required. -""" -function start_device_sync(Features; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "StartDeviceSync", - Dict{String,Any}("Features" => Features); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_device_sync( - Features, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "StartDeviceSync", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Features" => Features), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - start_smart_home_appliance_discovery(room_arn) - start_smart_home_appliance_discovery(room_arn, params::Dict{String,<:Any}) - -Initiates the discovery of any smart home appliances associated with the room. - -# Arguments -- `room_arn`: The room where smart home appliance discovery was initiated. - -""" -function start_smart_home_appliance_discovery( - RoomArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "StartSmartHomeApplianceDiscovery", - Dict{String,Any}("RoomArn" => RoomArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_smart_home_appliance_discovery( - RoomArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "StartSmartHomeApplianceDiscovery", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RoomArn" => RoomArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - tag_resource(arn, tags) - tag_resource(arn, tags, params::Dict{String,<:Any}) - -Adds metadata tags to a specified resource. - -# Arguments -- `arn`: The ARN of the resource to which to add metadata tags. Required. -- `tags`: The tags to be added to the specified resource. Do not provide system tags. - Required. - -""" -function tag_resource(Arn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "TagResource", - Dict{String,Any}("Arn" => Arn, "Tags" => Tags); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function tag_resource( - Arn, - Tags, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "TagResource", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Arn" => Arn, "Tags" => Tags), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - untag_resource(arn, tag_keys) - untag_resource(arn, tag_keys, params::Dict{String,<:Any}) - -Removes metadata tags from a specified resource. - -# Arguments -- `arn`: The ARN of the resource from which to remove metadata tags. Required. -- `tag_keys`: The tags to be removed from the specified resource. Do not provide system - tags. Required. - -""" -function untag_resource(Arn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UntagResource", - Dict{String,Any}("Arn" => Arn, "TagKeys" => TagKeys); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function untag_resource( - Arn, - TagKeys, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UntagResource", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Arn" => Arn, "TagKeys" => TagKeys), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_address_book(address_book_arn) - update_address_book(address_book_arn, params::Dict{String,<:Any}) - -Updates address book details by the address book ARN. - -# Arguments -- `address_book_arn`: The ARN of the room to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The updated description of the room. -- `"Name"`: The updated name of the room. -""" -function update_address_book( - AddressBookArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateAddressBook", - Dict{String,Any}("AddressBookArn" => AddressBookArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_address_book( - AddressBookArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateAddressBook", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("AddressBookArn" => AddressBookArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_business_report_schedule(schedule_arn) - update_business_report_schedule(schedule_arn, params::Dict{String,<:Any}) - -Updates the configuration of the report delivery schedule with the specified schedule ARN. - -# Arguments -- `schedule_arn`: The ARN of the business report schedule. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Format"`: The format of the generated report (individual CSV files or zipped files of - individual files). -- `"Recurrence"`: The recurrence of the reports. -- `"S3BucketName"`: The S3 location of the output reports. -- `"S3KeyPrefix"`: The S3 key where the report is delivered. -- `"ScheduleName"`: The name identifier of the schedule. -""" -function update_business_report_schedule( - ScheduleArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateBusinessReportSchedule", - Dict{String,Any}("ScheduleArn" => ScheduleArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_business_report_schedule( - ScheduleArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateBusinessReportSchedule", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ScheduleArn" => ScheduleArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_conference_provider(conference_provider_arn, conference_provider_type, meeting_setting) - update_conference_provider(conference_provider_arn, conference_provider_type, meeting_setting, params::Dict{String,<:Any}) - -Updates an existing conference provider's settings. - -# Arguments -- `conference_provider_arn`: The ARN of the conference provider. -- `conference_provider_type`: The type of the conference provider. -- `meeting_setting`: The meeting settings for the conference provider. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"IPDialIn"`: The IP endpoint and protocol for calling. -- `"PSTNDialIn"`: The information for PSTN conferencing. -""" -function update_conference_provider( - ConferenceProviderArn, - ConferenceProviderType, - MeetingSetting; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateConferenceProvider", - Dict{String,Any}( - "ConferenceProviderArn" => ConferenceProviderArn, - "ConferenceProviderType" => ConferenceProviderType, - "MeetingSetting" => MeetingSetting, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_conference_provider( - ConferenceProviderArn, - ConferenceProviderType, - MeetingSetting, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateConferenceProvider", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ConferenceProviderArn" => ConferenceProviderArn, - "ConferenceProviderType" => ConferenceProviderType, - "MeetingSetting" => MeetingSetting, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_contact(contact_arn) - update_contact(contact_arn, params::Dict{String,<:Any}) - -Updates the contact details by the contact ARN. - -# Arguments -- `contact_arn`: The ARN of the contact to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DisplayName"`: The updated display name of the contact. -- `"FirstName"`: The updated first name of the contact. -- `"LastName"`: The updated last name of the contact. -- `"PhoneNumber"`: The updated phone number of the contact. The phone number type defaults - to WORK. You can either specify PhoneNumber or PhoneNumbers. We recommend that you use - PhoneNumbers, which lets you specify the phone number type and multiple numbers. -- `"PhoneNumbers"`: The list of phone numbers for the contact. -- `"SipAddresses"`: The list of SIP addresses for the contact. -""" -function update_contact(ContactArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateContact", - Dict{String,Any}("ContactArn" => ContactArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_contact( - ContactArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateContact", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ContactArn" => ContactArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_device() - update_device(params::Dict{String,<:Any}) - -Updates the device name by device ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeviceArn"`: The ARN of the device to update. Required. -- `"DeviceName"`: The updated device name. Required. -""" -function update_device(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateDevice"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function update_device( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateDevice", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_gateway(gateway_arn) - update_gateway(gateway_arn, params::Dict{String,<:Any}) - -Updates the details of a gateway. If any optional field is not provided, the existing -corresponding value is left unmodified. - -# Arguments -- `gateway_arn`: The ARN of the gateway to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The updated description of the gateway. -- `"Name"`: The updated name of the gateway. -- `"SoftwareVersion"`: The updated software version of the gateway. The gateway - automatically updates its software version during normal operation. -""" -function update_gateway(GatewayArn; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateGateway", - Dict{String,Any}("GatewayArn" => GatewayArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_gateway( - GatewayArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateGateway", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("GatewayArn" => GatewayArn), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_gateway_group(gateway_group_arn) - update_gateway_group(gateway_group_arn, params::Dict{String,<:Any}) - -Updates the details of a gateway group. If any optional field is not provided, the existing -corresponding value is left unmodified. - -# Arguments -- `gateway_group_arn`: The ARN of the gateway group to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The updated description of the gateway group. -- `"Name"`: The updated name of the gateway group. -""" -function update_gateway_group( - GatewayGroupArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateGatewayGroup", - Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_gateway_group( - GatewayGroupArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateGatewayGroup", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("GatewayGroupArn" => GatewayGroupArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_network_profile(network_profile_arn) - update_network_profile(network_profile_arn, params::Dict{String,<:Any}) - -Updates a network profile by the network profile ARN. - -# Arguments -- `network_profile_arn`: The ARN of the network profile associated with a device. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CertificateAuthorityArn"`: The ARN of the Private Certificate Authority (PCA) created - in AWS Certificate Manager (ACM). This is used to issue certificates to the devices. -- `"CurrentPassword"`: The current password of the Wi-Fi network. -- `"Description"`: Detailed information about a device's network profile. -- `"NetworkProfileName"`: The name of the network profile associated with a device. -- `"NextPassword"`: The next, or subsequent, password of the Wi-Fi network. This password - is asynchronously transmitted to the device and is used when the password of the network - changes to NextPassword. -- `"TrustAnchors"`: The root certificate(s) of your authentication server that will be - installed on your devices and used to trust your authentication server during EAP - negotiation. -""" -function update_network_profile( - NetworkProfileArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateNetworkProfile", - Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_network_profile( - NetworkProfileArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return alexa_for_business( - "UpdateNetworkProfile", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("NetworkProfileArn" => NetworkProfileArn), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_profile() - update_profile(params::Dict{String,<:Any}) - -Updates an existing room profile by room profile ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Address"`: The updated address for the room profile. -- `"DataRetentionOptIn"`: Whether data retention of the profile is enabled. -- `"DistanceUnit"`: The updated distance unit for the room profile. -- `"IsDefault"`: Sets the profile as default if selected. If this is missing, no update is - done to the default status. -- `"Locale"`: The updated locale for the room profile. (This is currently only available to - a limited preview audience.) -- `"MaxVolumeLimit"`: The updated maximum volume limit for the room profile. -- `"MeetingRoomConfiguration"`: The updated meeting room settings of a room profile. -- `"PSTNEnabled"`: Whether the PSTN setting of the room profile is enabled. -- `"ProfileArn"`: The ARN of the room profile to update. Required. -- `"ProfileName"`: The updated name for the room profile. -- `"SetupModeDisabled"`: Whether the setup mode of the profile is enabled. -- `"TemperatureUnit"`: The updated temperature unit for the room profile. -- `"Timezone"`: The updated timezone for the room profile. -- `"WakeWord"`: The updated wake word for the room profile. -""" -function update_profile(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateProfile"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function update_profile( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateProfile", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_room() - update_room(params::Dict{String,<:Any}) - -Updates room details by room ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The updated description for the room. -- `"ProfileArn"`: The updated profile ARN for the room. -- `"ProviderCalendarId"`: The updated provider calendar ARN for the room. -- `"RoomArn"`: The ARN of the room to update. -- `"RoomName"`: The updated name for the room. -""" -function update_room(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateRoom"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function update_room( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateRoom", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_skill_group() - update_skill_group(params::Dict{String,<:Any}) - -Updates skill group details by skill group ARN. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The updated description for the skill group. -- `"SkillGroupArn"`: The ARN of the skill group to update. -- `"SkillGroupName"`: The updated name for the skill group. -""" -function update_skill_group(; aws_config::AbstractAWSConfig=global_aws_config()) - return alexa_for_business( - "UpdateSkillGroup"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function update_skill_group( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return alexa_for_business( - "UpdateSkillGroup", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end diff --git a/src/services/amp.jl b/src/services/amp.jl index 2782c83023..a1efe33b86 100644 --- a/src/services/amp.jl +++ b/src/services/amp.jl @@ -8,16 +8,20 @@ using AWS.UUIDs create_alert_manager_definition(data, workspace_id) create_alert_manager_definition(data, workspace_id, params::Dict{String,<:Any}) -Create an alert manager definition. +The CreateAlertManagerDefinition operation creates the alert manager definition in a +workspace. If a workspace already has an alert manager definition, don't use this operation +to update it. Instead, use PutAlertManagerDefinition. # Arguments -- `data`: The alert manager definition data. -- `workspace_id`: The ID of the workspace in which to create the alert manager definition. +- `data`: The alert manager definition to add. A base64-encoded version of the YAML alert + manager definition file. For details about the alert manager definition, see + AlertManagedDefinitionData. +- `workspace_id`: The ID of the workspace to add the alert manager definition to. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function create_alert_manager_definition( data, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -55,17 +59,18 @@ end create_logging_configuration(log_group_arn, workspace_id) create_logging_configuration(log_group_arn, workspace_id, params::Dict{String,<:Any}) -Create logging configuration. +The CreateLoggingConfiguration operation creates a logging configuration for the workspace. +Use this operation to set the CloudWatch log group to which the logs will be published to. # Arguments -- `log_group_arn`: The ARN of the CW log group to which the vended log data will be - published. -- `workspace_id`: The ID of the workspace to vend logs to. +- `log_group_arn`: The ARN of the CloudWatch log group to which the vended log data will be + published. This log group must exist prior to calling this API. +- `workspace_id`: The ID of the workspace to create the logging configuration for. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function create_logging_configuration( logGroupArn, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -105,18 +110,23 @@ end create_rule_groups_namespace(data, name, workspace_id) create_rule_groups_namespace(data, name, workspace_id, params::Dict{String,<:Any}) -Create a rule group namespace. +The CreateRuleGroupsNamespace operation creates a rule groups namespace within a workspace. +A rule groups namespace is associated with exactly one rules file. A workspace can have +multiple rule groups namespaces. Use this operation only to create new rule groups +namespaces. To update an existing rule groups namespace, use PutRuleGroupsNamespace. # Arguments -- `data`: The namespace data that define the rule groups. -- `name`: The rule groups namespace name. -- `workspace_id`: The ID of the workspace in which to create the rule group namespace. +- `data`: The rules file to use in the new namespace. Contains the base64-encoded version + of the YAML rules file. For details about the rule groups namespace structure, see + RuleGroupsNamespaceData. +- `name`: The name for the new rule groups namespace. +- `workspace_id`: The ID of the workspace to add the rule groups namespace. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. -- `"tags"`: Optional, user-provided tags for this rule groups namespace. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. +- `"tags"`: The list of tag keys and values to associate with the rule groups namespace. """ function create_rule_groups_namespace( data, name, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -153,19 +163,104 @@ function create_rule_groups_namespace( ) end +""" + create_scraper(destination, scrape_configuration, source) + create_scraper(destination, scrape_configuration, source, params::Dict{String,<:Any}) + +The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics +from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your +Amazon Managed Service for Prometheus workspace. You can configure the scraper to control +what metrics are collected, and what transformations are applied prior to sending them to +your workspace. If needed, an IAM role will be created for you that gives Amazon Managed +Service for Prometheus access to the metrics in your cluster. For more information, see +Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User +Guide. You cannot update a scraper. If you want to change the configuration of the scraper, +create a new scraper and delete the old one. The scrapeConfiguration parameter contains the +base64-encoded version of the YAML configuration file. For more information about +collectors, including what metrics are collected, and how to configure the scraper, see +Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User +Guide. + +# Arguments +- `destination`: The Amazon Managed Service for Prometheus workspace to send metrics to. +- `scrape_configuration`: The configuration file to use in the new scraper. For more + information, see Scraper configuration in the Amazon Managed Service for Prometheus User + Guide. +- `source`: The Amazon EKS cluster from which the scraper will collect metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"alias"`: (optional) a name to associate with the scraper. This is for your use, and + does not need to be unique. +- `"clientToken"`: (Optional) A unique, case-sensitive identifier that you can provide to + ensure the idempotency of the request. +- `"tags"`: (Optional) The list of tag keys and values to associate with the scraper. +""" +function create_scraper( + destination, + scrapeConfiguration, + source; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amp( + "POST", + "/scrapers", + Dict{String,Any}( + "destination" => destination, + "scrapeConfiguration" => scrapeConfiguration, + "source" => source, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_scraper( + destination, + scrapeConfiguration, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amp( + "POST", + "/scrapers", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destination" => destination, + "scrapeConfiguration" => scrapeConfiguration, + "source" => source, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_workspace() create_workspace(params::Dict{String,<:Any}) -Creates a new AMP workspace. +Creates a Prometheus workspace. A workspace is a logical space dedicated to the storage and +querying of Prometheus metrics. You can have one or more workspaces in each Region in your +account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"alias"`: An optional user-assigned alias for this workspace. This alias is for user - reference and does not need to be unique. -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. -- `"tags"`: Optional, user-provided tags for this workspace. +- `"alias"`: An alias that you assign to this workspace to help you identify it. It does + not need to be unique. Blank spaces at the beginning or end of the alias that you specify + will be trimmed from the value used. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. +- `"kmsKeyArn"`: (optional) The ARN for a customer managed KMS key to use for encrypting + data within your workspace. For more information about using your own key in your + workspace, see Encryption at rest in the Amazon Managed Service for Prometheus User Guide. +- `"tags"`: The list of tag keys and values to associate with the workspace. """ function create_workspace(; aws_config::AbstractAWSConfig=global_aws_config()) return amp( @@ -194,15 +289,15 @@ end delete_alert_manager_definition(workspace_id) delete_alert_manager_definition(workspace_id, params::Dict{String,<:Any}) -Deletes an alert manager definition. +Deletes the alert manager definition from a workspace. # Arguments -- `workspace_id`: The ID of the workspace in which to delete the alert manager definition. +- `workspace_id`: The ID of the workspace to delete the alert manager definition from. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function delete_alert_manager_definition( workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -235,15 +330,15 @@ end delete_logging_configuration(workspace_id) delete_logging_configuration(workspace_id, params::Dict{String,<:Any}) -Delete logging configuration. +Deletes the logging configuration for a workspace. # Arguments -- `workspace_id`: The ID of the workspace to vend logs to. +- `workspace_id`: The ID of the workspace containing the logging configuration to delete. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function delete_logging_configuration( workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -276,16 +371,17 @@ end delete_rule_groups_namespace(name, workspace_id) delete_rule_groups_namespace(name, workspace_id, params::Dict{String,<:Any}) -Delete a rule groups namespace. +Deletes one rule groups namespace and its associated rule groups definition. # Arguments -- `name`: The rule groups namespace name. -- `workspace_id`: The ID of the workspace to delete rule group definition. +- `name`: The name of the rule groups namespace to delete. +- `workspace_id`: The ID of the workspace containing the rule groups namespace and + definition to delete. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function delete_rule_groups_namespace( name, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -315,19 +411,61 @@ function delete_rule_groups_namespace( ) end +""" + delete_scraper(scraper_id) + delete_scraper(scraper_id, params::Dict{String,<:Any}) + +The DeleteScraper operation deletes one scraper, and stops any metrics collection that the +scraper performs. + +# Arguments +- `scraper_id`: The ID of the scraper to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: (Optional) A unique, case-sensitive identifier that you can provide to + ensure the idempotency of the request. +""" +function delete_scraper(scraperId; aws_config::AbstractAWSConfig=global_aws_config()) + return amp( + "DELETE", + "/scrapers/$(scraperId)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_scraper( + scraperId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amp( + "DELETE", + "/scrapers/$(scraperId)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_workspace(workspace_id) delete_workspace(workspace_id, params::Dict{String,<:Any}) -Deletes an AMP workspace. +Deletes an existing workspace. When you delete a workspace, the data that has been +ingested into it is not immediately deleted. It will be permanently deleted within one +month. # Arguments - `workspace_id`: The ID of the workspace to delete. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function delete_workspace(workspaceId; aws_config::AbstractAWSConfig=global_aws_config()) return amp( @@ -358,10 +496,10 @@ end describe_alert_manager_definition(workspace_id) describe_alert_manager_definition(workspace_id, params::Dict{String,<:Any}) -Describes an alert manager definition. +Retrieves the full information about the alert manager definition for a workspace. # Arguments -- `workspace_id`: The ID of the workspace to describe. +- `workspace_id`: The ID of the workspace to retrieve the alert manager definition from. """ function describe_alert_manager_definition( @@ -392,10 +530,10 @@ end describe_logging_configuration(workspace_id) describe_logging_configuration(workspace_id, params::Dict{String,<:Any}) -Describes logging configuration. +Returns complete information about the current logging configuration of the workspace. # Arguments -- `workspace_id`: The ID of the workspace to vend logs to. +- `workspace_id`: The ID of the workspace to describe the logging configuration for. """ function describe_logging_configuration( @@ -426,11 +564,12 @@ end describe_rule_groups_namespace(name, workspace_id) describe_rule_groups_namespace(name, workspace_id, params::Dict{String,<:Any}) -Describe a rule groups namespace. +Returns complete information about one rule groups namespace. To retrieve a list of rule +groups namespaces, use ListRuleGroupsNamespaces. # Arguments -- `name`: The rule groups namespace. -- `workspace_id`: The ID of the workspace to describe. +- `name`: The name of the rule groups namespace that you want information for. +- `workspace_id`: The ID of the workspace containing the rule groups namespace. """ function describe_rule_groups_namespace( @@ -458,11 +597,43 @@ function describe_rule_groups_namespace( ) end +""" + describe_scraper(scraper_id) + describe_scraper(scraper_id, params::Dict{String,<:Any}) + +The DescribeScraper operation displays information about an existing scraper. + +# Arguments +- `scraper_id`: The ID of the scraper to describe. + +""" +function describe_scraper(scraperId; aws_config::AbstractAWSConfig=global_aws_config()) + return amp( + "GET", + "/scrapers/$(scraperId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_scraper( + scraperId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amp( + "GET", + "/scrapers/$(scraperId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_workspace(workspace_id) describe_workspace(workspace_id, params::Dict{String,<:Any}) -Describes an existing AMP workspace. +Returns information about an existing workspace. # Arguments - `workspace_id`: The ID of the workspace to describe. @@ -490,22 +661,56 @@ function describe_workspace( ) end +""" + get_default_scraper_configuration() + get_default_scraper_configuration(params::Dict{String,<:Any}) + +The GetDefaultScraperConfiguration operation returns the default scraper configuration used +when Amazon EKS creates a scraper for you. + +""" +function get_default_scraper_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return amp( + "GET", + "/scraperconfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_default_scraper_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return amp( + "GET", + "/scraperconfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_rule_groups_namespaces(workspace_id) list_rule_groups_namespaces(workspace_id, params::Dict{String,<:Any}) -Lists rule groups namespaces. +Returns a list of rule groups namespaces in a workspace. # Arguments -- `workspace_id`: The ID of the workspace. +- `workspace_id`: The ID of the workspace containing the rule groups namespaces. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum results to return in response (default=100, maximum=1000). -- `"name"`: Optional filter for rule groups namespace name. Only the rule groups namespace - that begin with this value will be returned. -- `"nextToken"`: Pagination token to request the next page in a paginated list. This token - is obtained from the output of the previous ListRuleGroupsNamespaces request. +- `"maxResults"`: The maximum number of results to return. The default is 100. +- `"name"`: Use this parameter to filter the rule groups namespaces that are returned. Only + the namespaces with names that begin with the value that you specify are returned. +- `"nextToken"`: The token for the next set of items to return. You receive this token from + a previous call, and use it to get the next page of results. The other parameters must be + the same as the initial call. For example, if your initial request has maxResults of 10, + and there are 12 rule groups namespaces to return, then your initial request will return 10 + and a nextToken. Using the next token in a subsequent call will return the remaining 2 + namespaces. """ function list_rule_groups_namespaces( workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,14 +736,52 @@ function list_rule_groups_namespaces( ) end +""" + list_scrapers() + list_scrapers(params::Dict{String,<:Any}) + +The ListScrapers operation lists all of the scrapers in your account. This includes +scrapers being created or deleted. You can optionally filter the returned list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: (Optional) A list of key-value pairs to filter the list of scrapers + returned. Keys include status, sourceArn, destinationArn, and alias. Filters on the same + key are OR'd together, and filters on different keys are AND'd together. For example, + status=ACTIVE&status=CREATING&alias=Test, will return all scrapers that have the + alias Test, and are either in status ACTIVE or CREATING. To find all active scrapers that + are sending metrics to a specific Amazon Managed Service for Prometheus workspace, you + would use the ARN of the workspace in a query: + status=ACTIVE&destinationArn=arn:aws:aps:us-east-1:123456789012:workspace/ws-example1-12 + 34-abcd-56ef-123456789012 If this is included, it filters the results to only the scrapers + that match the filter. +- `"maxResults"`: Optional) The maximum number of scrapers to return in one ListScrapers + operation. The range is 1-1000. If you omit this parameter, the default of 100 is used. +- `"nextToken"`: (Optional) The token for the next set of items to return. (You received + this token from a previous call.) +""" +function list_scrapers(; aws_config::AbstractAWSConfig=global_aws_config()) + return amp("GET", "/scrapers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_scrapers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return amp( + "GET", "/scrapers", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Lists the tags you have assigned to the resource. +The ListTagsForResource operation returns the tags that are associated with an Amazon +Managed Service for Prometheus resource. Currently, the only resources that can be tagged +are workspaces and rule groups namespaces. # Arguments -- `resource_arn`: The ARN of the resource. +- `resource_arn`: The ARN of the resource to list tages for. Must be a workspace or rule + groups namespace resource. """ function list_tags_for_resource( @@ -569,15 +812,22 @@ end list_workspaces() list_workspaces(params::Dict{String,<:Any}) -Lists all AMP workspaces, including workspaces being created or deleted. +Lists all of the Amazon Managed Service for Prometheus workspaces in your account. This +includes workspaces being created or deleted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"alias"`: Optional filter for workspace alias. Only the workspaces with aliases that - begin with this value will be returned. -- `"maxResults"`: Maximum results to return in response (default=100, maximum=1000). -- `"nextToken"`: Pagination token to request the next page in a paginated list. This token - is obtained from the output of the previous ListWorkspaces request. +- `"alias"`: If this is included, it filters the results to only the workspaces with names + that start with the value that you specify here. Amazon Managed Service for Prometheus will + automatically strip any blank spaces from the beginning and end of the alias that you + specify. +- `"maxResults"`: The maximum number of workspaces to return per request. The default is + 100. +- `"nextToken"`: The token for the next set of items to return. You receive this token from + a previous call, and use it to get the next page of results. The other parameters must be + the same as the initial call. For example, if your initial request has maxResults of 10, + and there are 12 workspaces to return, then your initial request will return 10 and a + nextToken. Using the next token in a subsequent call will return the remaining 2 workspaces. """ function list_workspaces(; aws_config::AbstractAWSConfig=global_aws_config()) return amp("GET", "/workspaces"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -594,16 +844,20 @@ end put_alert_manager_definition(data, workspace_id) put_alert_manager_definition(data, workspace_id, params::Dict{String,<:Any}) -Update an alert manager definition. +Updates an existing alert manager definition in a workspace. If the workspace does not +already have an alert manager definition, don't use this operation to create it. Instead, +use CreateAlertManagerDefinition. # Arguments -- `data`: The alert manager definition data. -- `workspace_id`: The ID of the workspace in which to update the alert manager definition. +- `data`: The alert manager definition to use. A base64-encoded version of the YAML alert + manager definition file. For details about the alert manager definition, see + AlertManagedDefinitionData. +- `workspace_id`: The ID of the workspace to update the alert manager definition in. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function put_alert_manager_definition( data, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -641,17 +895,23 @@ end put_rule_groups_namespace(data, name, workspace_id) put_rule_groups_namespace(data, name, workspace_id, params::Dict{String,<:Any}) -Update a rule groups namespace. +Updates an existing rule groups namespace within a workspace. A rule groups namespace is +associated with exactly one rules file. A workspace can have multiple rule groups +namespaces. Use this operation only to update existing rule groups namespaces. To create a +new rule groups namespace, use CreateRuleGroupsNamespace. You can't use this operation to +add tags to an existing rule groups namespace. Instead, use TagResource. # Arguments -- `data`: The namespace data that define the rule groups. -- `name`: The rule groups namespace name. -- `workspace_id`: The ID of the workspace in which to update the rule group namespace. +- `data`: The new rules file to use in the namespace. A base64-encoded version of the YAML + rule groups file. For details about the rule groups namespace structure, see + RuleGroupsNamespaceData. +- `name`: The name of the rule groups namespace that you are updating. +- `workspace_id`: The ID of the workspace where you are updating the rule groups namespace. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function put_rule_groups_namespace( data, name, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -690,11 +950,16 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Creates tags for the specified resource. +The TagResource operation associates tags with an Amazon Managed Service for Prometheus +resource. The only resources that can be tagged are workspaces and rule groups namespaces. +If you specify a new tag key for the resource, this tag is appended to the list of tags +associated with the resource. If you specify a tag key that is already associated with the +resource, the new tag value that you specify replaces the previous value for that tag. # Arguments -- `resource_arn`: The ARN of the resource. -- `tags`: +- `resource_arn`: The ARN of the workspace or rule groups namespace to apply tags to. +- `tags`: The list of tag keys and values to associate with the resource. Keys may not + begin with aws:. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -725,11 +990,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Deletes tags from the specified resource. +Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only +resources that can be tagged are workspaces and rule groups namespaces. # Arguments -- `resource_arn`: The ARN of the resource. -- `tag_keys`: One or more tag keys +- `resource_arn`: The ARN of the workspace or rule groups namespace. +- `tag_keys`: The keys of the tags to remove. """ function untag_resource( @@ -762,17 +1028,17 @@ end update_logging_configuration(log_group_arn, workspace_id) update_logging_configuration(log_group_arn, workspace_id, params::Dict{String,<:Any}) -Update logging configuration. +Updates the log group ARN or the workspace ID of the current logging configuration. # Arguments -- `log_group_arn`: The ARN of the CW log group to which the vended log data will be +- `log_group_arn`: The ARN of the CloudWatch log group to which the vended log data will be published. -- `workspace_id`: The ID of the workspace to vend logs to. +- `workspace_id`: The ID of the workspace to update the logging configuration for. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function update_logging_configuration( logGroupArn, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -812,16 +1078,18 @@ end update_workspace_alias(workspace_id) update_workspace_alias(workspace_id, params::Dict{String,<:Any}) -Updates an AMP workspace alias. +Updates the alias of an existing workspace. # Arguments -- `workspace_id`: The ID of the workspace being updated. +- `workspace_id`: The ID of the workspace to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"alias"`: The new alias of the workspace. -- `"clientToken"`: Optional, unique, case-sensitive, user-provided identifier to ensure the - idempotency of the request. +- `"alias"`: The new alias for the workspace. It does not need to be unique. Amazon Managed + Service for Prometheus will automatically strip any blank spaces from the beginning and end + of the alias that you specify. +- `"clientToken"`: A unique identifier that you can provide to ensure the idempotency of + the request. Case-sensitive. """ function update_workspace_alias( workspaceId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/amplify.jl b/src/services/amplify.jl index 1f8cd6a58d..a586af0907 100644 --- a/src/services/amplify.jl +++ b/src/services/amplify.jl @@ -8,10 +8,10 @@ using AWS.UUIDs create_app(name) create_app(name, params::Dict{String,<:Any}) - Creates a new Amplify app. +Creates a new Amplify app. # Arguments -- `name`: The name for an Amplify app. +- `name`: The name of the Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -24,25 +24,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide . -- `"autoBranchCreationConfig"`: The automated branch creation configuration for an Amplify +- `"autoBranchCreationConfig"`: The automated branch creation configuration for an Amplify app. -- `"autoBranchCreationPatterns"`: The automated branch creation glob patterns for an +- `"autoBranchCreationPatterns"`: The automated branch creation glob patterns for an Amplify app. -- `"basicAuthCredentials"`: The credentials for basic authorization for an Amplify app. - You must base64-encode the authorization credentials and provide them in the format +- `"basicAuthCredentials"`: The credentials for basic authorization for an Amplify app. You + must base64-encode the authorization credentials and provide them in the format user:password. -- `"buildSpec"`: The build specification (build spec) for an Amplify app. +- `"buildSpec"`: The build specification (build spec) for an Amplify app. - `"customHeaders"`: The custom HTTP headers for an Amplify app. -- `"customRules"`: The custom rewrite and redirect rules for an Amplify app. -- `"description"`: The description for an Amplify app. -- `"enableAutoBranchCreation"`: Enables automated branch creation for an Amplify app. -- `"enableBasicAuth"`: Enables basic authorization for an Amplify app. This will apply to +- `"customRules"`: The custom rewrite and redirect rules for an Amplify app. +- `"description"`: The description of the Amplify app. +- `"enableAutoBranchCreation"`: Enables automated branch creation for an Amplify app. +- `"enableBasicAuth"`: Enables basic authorization for an Amplify app. This will apply to all branches that are part of this app. -- `"enableBranchAutoBuild"`: Enables the auto building of branches for an Amplify app. -- `"enableBranchAutoDeletion"`: Automatically disconnects a branch in the Amplify Console +- `"enableBranchAutoBuild"`: Enables the auto building of branches for an Amplify app. +- `"enableBranchAutoDeletion"`: Automatically disconnects a branch in the Amplify console when you delete a branch from your Git repository. -- `"environmentVariables"`: The environment variables map for an Amplify app. -- `"iamServiceRoleArn"`: The AWS Identity and Access Management (IAM) service role for an +- `"environmentVariables"`: The environment variables map for an Amplify app. For a list + of the environment variables that are accessible to Amplify by default, see Amplify + Environment variables in the Amplify Hosting User Guide. +- `"iamServiceRoleArn"`: The AWS Identity and Access Management (IAM) service role for an Amplify app. - `"oauthToken"`: The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH @@ -53,12 +55,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide . -- `"platform"`: The platform for the Amplify app. For a static app, set the platform type +- `"platform"`: The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC. -- `"repository"`: The repository for an Amplify app. -- `"tags"`: The tag for an Amplify app. +- `"repository"`: The Git repository for the Amplify app. +- `"tags"`: The tag for an Amplify app. """ function create_app(name; aws_config::AbstractAWSConfig=global_aws_config()) return amplify( @@ -85,16 +87,20 @@ end create_backend_environment(app_id, environment_name) create_backend_environment(app_id, environment_name, params::Dict{String,<:Any}) - Creates a new backend environment for an Amplify app. +Creates a new backend environment for an Amplify app. This API is available only to +Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify +command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. +When you deploy an application with Amplify Gen 2, you provision the app's backend +infrastructure using Typescript code. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `environment_name`: The name for the backend environment. +- `app_id`: The unique ID for an Amplify app. +- `environment_name`: The name for the backend environment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"deploymentArtifacts"`: The name of deployment artifacts. -- `"stackName"`: The AWS CloudFormation stack name of a backend environment. +- `"deploymentArtifacts"`: The name of deployment artifacts. +- `"stackName"`: The AWS CloudFormation stack name of a backend environment. """ function create_backend_environment( appId, environmentName; aws_config::AbstractAWSConfig=global_aws_config() @@ -134,16 +140,21 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch. +- `branch_name`: The name for the branch. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backendEnvironmentArn"`: The Amazon Resource Name (ARN) for a backend environment that - is part of an Amplify app. +- `"backend"`: The backend for a Branch of an Amplify app. Use for a backend created from + an CloudFormation stack. This field is available to Amplify Gen 2 apps only. When you + deploy an application with Amplify Gen 2, you provision the app's backend infrastructure + using Typescript code. +- `"backendEnvironmentArn"`: The Amazon Resource Name (ARN) for a backend environment that + is part of a Gen 1 Amplify app. This field is available to Amplify Gen 1 apps only where + the backend is created using Amplify Studio or the Amplify command line interface (CLI). - `"basicAuthCredentials"`: The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password. - `"buildSpec"`: The build specification (build spec) for the branch. -- `"description"`: The description for the branch. +- `"description"`: The description for the branch. - `"displayName"`: The display name for a branch. This is used as the default domain prefix. - `"enableAutoBuild"`: Enables auto building for the branch. @@ -157,7 +168,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"environmentVariables"`: The environment variables for the branch. - `"framework"`: The framework for the branch. - `"pullRequestEnvironmentName"`: The Amplify environment name for the pull request. -- `"stage"`: Describes the current stage for the branch. +- `"stage"`: Describes the current stage for the branch. - `"tags"`: The tag for the branch. - `"ttl"`: The content Time To Live (TTL) for the website in seconds. """ @@ -191,12 +202,14 @@ end create_deployment(app_id, branch_name) create_deployment(app_id, branch_name, params::Dict{String,<:Any}) - Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not -connected to a repository. +Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not +connected to a repository. The maximum duration between the CreateDeployment call and the +StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the +StartDeployment call and the associated Job will fail. # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch, for the job. +- `branch_name`: The name of the branch to use for the job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -234,8 +247,8 @@ end create_domain_association(app_id, domain_name, sub_domain_settings) create_domain_association(app_id, domain_name, sub_domain_settings, params::Dict{String,<:Any}) - Creates a new domain association for an Amplify app. This action associates a custom -domain with the Amplify app +Creates a new domain association for an Amplify app. This action associates a custom domain +with the Amplify app # Arguments - `app_id`: The unique ID for an Amplify app. @@ -248,6 +261,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys creation. - `"autoSubDomainIAMRole"`: The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains. +- `"certificateSettings"`: The type of SSL/TLS certificate to use for your custom domain. + If you don't specify a certificate type, Amplify uses the default certificate that it + provisions and manages for you. - `"enableAutoSubDomain"`: Enables the automated creation of subdomains for branches. """ function create_domain_association( @@ -291,15 +307,15 @@ end create_webhook(app_id, branch_name) create_webhook(app_id, branch_name, params::Dict{String,<:Any}) - Creates a new webhook on an Amplify app. +Creates a new webhook on an Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for a branch that is part of an Amplify app. +- `app_id`: The unique ID for an Amplify app. +- `branch_name`: The name for a branch that is part of an Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"description"`: The description for a webhook. +- `"description"`: The description for a webhook. """ function create_webhook( appId, branchName; aws_config::AbstractAWSConfig=global_aws_config() @@ -333,10 +349,10 @@ end delete_app(app_id) delete_app(app_id, params::Dict{String,<:Any}) - Deletes an existing Amplify app specified by an app ID. +Deletes an existing Amplify app specified by an app ID. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. """ function delete_app(appId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -360,11 +376,15 @@ end delete_backend_environment(app_id, environment_name) delete_backend_environment(app_id, environment_name, params::Dict{String,<:Any}) - Deletes a backend environment for an Amplify app. +Deletes a backend environment for an Amplify app. This API is available only to Amplify +Gen 1 applications where the backend is created using Amplify Studio or the Amplify command +line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you +deploy an application with Amplify Gen 2, you provision the app's backend infrastructure +using Typescript code. # Arguments -- `app_id`: The unique ID of an Amplify app. -- `environment_name`: The name of a backend environment of an Amplify app. +- `app_id`: The unique ID of an Amplify app. +- `environment_name`: The name of a backend environment of an Amplify app. """ function delete_backend_environment( @@ -400,7 +420,7 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch. +- `branch_name`: The name of the branch. """ function delete_branch(appId, branchName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -430,7 +450,7 @@ end delete_domain_association(app_id, domain_name) delete_domain_association(app_id, domain_name, params::Dict{String,<:Any}) - Deletes a domain association for an Amplify app. +Deletes a domain association for an Amplify app. # Arguments - `app_id`: The unique id for an Amplify app. @@ -470,7 +490,7 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch, for the job. +- `branch_name`: The name of the branch to use for the job. - `job_id`: The unique ID for the job. """ @@ -504,10 +524,10 @@ end delete_webhook(webhook_id) delete_webhook(webhook_id, params::Dict{String,<:Any}) - Deletes a webhook. +Deletes a webhook. # Arguments -- `webhook_id`: The unique ID for a webhook. +- `webhook_id`: The unique ID for a webhook. """ function delete_webhook(webhookId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -536,17 +556,17 @@ end generate_access_logs(app_id, domain_name) generate_access_logs(app_id, domain_name, params::Dict{String,<:Any}) - Returns the website access logs for a specific time range using a presigned URL. +Returns the website access logs for a specific time range using a presigned URL. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `domain_name`: The name of the domain. +- `app_id`: The unique ID for an Amplify app. +- `domain_name`: The name of the domain. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"endTime"`: The time at which the logs should end. The time range specified is - inclusive of the end time. -- `"startTime"`: The time at which the logs should start. The time range specified is +- `"endTime"`: The time at which the logs should end. The time range specified is inclusive + of the end time. +- `"startTime"`: The time at which the logs should start. The time range specified is inclusive of the start time. """ function generate_access_logs( @@ -581,10 +601,10 @@ end get_app(app_id) get_app(app_id, params::Dict{String,<:Any}) - Returns an existing Amplify app by appID. +Returns an existing Amplify app specified by an app ID. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. """ function get_app(appId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -608,10 +628,10 @@ end get_artifact_url(artifact_id) get_artifact_url(artifact_id, params::Dict{String,<:Any}) - Returns the artifact info that corresponds to an artifact id. +Returns the artifact info that corresponds to an artifact id. # Arguments -- `artifact_id`: The unique ID for an artifact. +- `artifact_id`: The unique ID for an artifact. """ function get_artifact_url(artifactId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -640,11 +660,15 @@ end get_backend_environment(app_id, environment_name) get_backend_environment(app_id, environment_name, params::Dict{String,<:Any}) - Returns a backend environment for an Amplify app. +Returns a backend environment for an Amplify app. This API is available only to Amplify +Gen 1 applications where the backend is created using Amplify Studio or the Amplify command +line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you +deploy an application with Amplify Gen 2, you provision the app's backend infrastructure +using Typescript code. # Arguments -- `app_id`: The unique id for an Amplify app. -- `environment_name`: The name for the backend environment. +- `app_id`: The unique id for an Amplify app. +- `environment_name`: The name for the backend environment. """ function get_backend_environment( @@ -680,7 +704,7 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch. +- `branch_name`: The name of the branch. """ function get_branch(appId, branchName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -710,7 +734,7 @@ end get_domain_association(app_id, domain_name) get_domain_association(app_id, domain_name, params::Dict{String,<:Any}) - Returns the domain information for an Amplify app. +Returns the domain information for an Amplify app. # Arguments - `app_id`: The unique id for an Amplify app. @@ -749,9 +773,9 @@ end Returns a job for a branch of an Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `branch_name`: The branch name for the job. -- `job_id`: The unique ID for the job. +- `app_id`: The unique ID for an Amplify app. +- `branch_name`: The name of the branch to use for the job. +- `job_id`: The unique ID for the job. """ function get_job( @@ -784,10 +808,10 @@ end get_webhook(webhook_id) get_webhook(webhook_id, params::Dict{String,<:Any}) - Returns the webhook information that corresponds to a specified webhook ID. +Returns the webhook information that corresponds to a specified webhook ID. # Arguments -- `webhook_id`: The unique ID for a webhook. +- `webhook_id`: The unique ID for a webhook. """ function get_webhook(webhookId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -816,12 +840,12 @@ end list_apps() list_apps(params::Dict{String,<:Any}) - Returns a list of the existing Amplify apps. +Returns a list of the existing Amplify apps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. If non-null, the pagination token is returned in a +- `"maxResults"`: The maximum number of records to list in a single response. +- `"nextToken"`: A pagination token. If non-null, the pagination token is returned in a result. Pass its value in another request to retrieve more entries. """ function list_apps(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -839,17 +863,17 @@ end list_artifacts(app_id, branch_name, job_id) list_artifacts(app_id, branch_name, job_id, params::Dict{String,<:Any}) - Returns a list of artifacts for a specified app, branch, and job. +Returns a list of artifacts for a specified app, branch, and job. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name of a branch that is part of an Amplify app. -- `job_id`: The unique ID for a job. +- `app_id`: The unique ID for an Amplify app. +- `branch_name`: The name of a branch that is part of an Amplify app. +- `job_id`: The unique ID for a job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. Set to null to start listing artifacts from start. If +- `"maxResults"`: The maximum number of records to list in a single response. +- `"nextToken"`: A pagination token. Set to null to start listing artifacts from start. If a non-null pagination token is returned in a result, pass its value in here to list more artifacts. """ @@ -883,18 +907,22 @@ end list_backend_environments(app_id) list_backend_environments(app_id, params::Dict{String,<:Any}) - Lists the backend environments for an Amplify app. +Lists the backend environments for an Amplify app. This API is available only to Amplify +Gen 1 applications where the backend is created using Amplify Studio or the Amplify command +line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you +deploy an application with Amplify Gen 2, you provision the app's backend infrastructure +using Typescript code. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"environmentName"`: The name of the backend environment -- `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. Set to null to start listing backend environments - from the start. If a non-null pagination token is returned in a result, pass its value in - here to list more backend environments. +- `"environmentName"`: The name of the backend environment +- `"maxResults"`: The maximum number of records to list in a single response. +- `"nextToken"`: A pagination token. Set to null to start listing backend environments from + the start. If a non-null pagination token is returned in a result, pass its value in here + to list more backend environments. """ function list_backend_environments(appId; aws_config::AbstractAWSConfig=global_aws_config()) return amplify( @@ -923,12 +951,12 @@ end Lists the branches of an Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. Set to null to start listing branches from the start. +- `"nextToken"`: A pagination token. Set to null to start listing branches from the start. If a non-null pagination token is returned in a result, pass its value in here to list more branches. """ @@ -956,7 +984,7 @@ end list_domain_associations(app_id) list_domain_associations(app_id, params::Dict{String,<:Any}) - Returns the domain associations for an Amplify app. +Returns the domain associations for an Amplify app. # Arguments - `app_id`: The unique ID for an Amplify app. @@ -996,12 +1024,12 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for a branch. +- `branch_name`: The name of the branch to use for the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. Set to null to start listing steps from the start. If +- `"maxResults"`: The maximum number of records to list in a single response. +- `"nextToken"`: A pagination token. Set to null to start listing steps from the start. If a non-null pagination token is returned in a result, pass its value in here to list more steps. """ @@ -1032,10 +1060,10 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) - Returns a list of tags for a specified Amazon Resource Name (ARN). +Returns a list of tags for a specified Amazon Resource Name (ARN). # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) to use to list tags. +- `resource_arn`: The Amazon Resource Name (ARN) to use to list tags. """ function list_tags_for_resource( @@ -1066,15 +1094,15 @@ end list_webhooks(app_id) list_webhooks(app_id, params::Dict{String,<:Any}) - Returns a list of webhooks for an Amplify app. +Returns a list of webhooks for an Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of records to list in a single response. -- `"nextToken"`: A pagination token. Set to null to start listing webhooks from the start. +- `"maxResults"`: The maximum number of records to list in a single response. +- `"nextToken"`: A pagination token. Set to null to start listing webhooks from the start. If non-null,the pagination token is returned in a result. Pass its value in here to list more webhooks. """ @@ -1102,17 +1130,19 @@ end start_deployment(app_id, branch_name) start_deployment(app_id, branch_name, params::Dict{String,<:Any}) - Starts a deployment for a manually deployed app. Manually deployed apps are not connected -to a repository. +Starts a deployment for a manually deployed app. Manually deployed apps are not connected +to a repository. The maximum duration between the CreateDeployment call and the +StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the +StartDeployment call and the associated Job will fail. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch, for the job. +- `app_id`: The unique ID for an Amplify app. +- `branch_name`: The name of the branch to use for the job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"jobId"`: The job ID for this deployment, generated by the create deployment request. -- `"sourceUrl"`: The source URL for this deployment, used when calling start deployment +- `"jobId"`: The job ID for this deployment, generated by the create deployment request. +- `"sourceUrl"`: The source URL for this deployment, used when calling start deployment without create deployment. The source URL can be any HTTP GET URL that is publicly accessible and downloads a single .zip file. """ @@ -1148,11 +1178,11 @@ end Starts a new job for a branch of an Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. -- `branch_name`: The branch name for the job. -- `job_type`: Describes the type for the job. The job type RELEASE starts a new job with +- `app_id`: The unique ID for an Amplify app. +- `branch_name`: The name of the branch to use for the job. +- `job_type`: Describes the type for the job. The job type RELEASE starts a new job with the latest change from the specified branch. This value is available only for apps that are - connected to a repository. The job type RETRY retries an existing job. If the job type + connected to a repository. The job type RETRY retries an existing job. If the job type value is RETRY, the jobId is also required. # Optional Parameters @@ -1161,9 +1191,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"commitMessage"`: The commit message from a third-party repository provider for the job. - `"commitTime"`: The commit date and time for the job. -- `"jobId"`: The unique ID for an existing job. This is required if the value of jobType - is RETRY. -- `"jobReason"`: A descriptive reason for starting this job. +- `"jobId"`: The unique ID for an existing job. This is required if the value of jobType is + RETRY. +- `"jobReason"`: A descriptive reason for starting the job. """ function start_job( appId, branchName, jobType; aws_config::AbstractAWSConfig=global_aws_config() @@ -1200,7 +1230,7 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch, for the job. +- `branch_name`: The name of the branch to use for the stop job request. - `job_id`: The unique id for the job. """ @@ -1234,11 +1264,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) - Tags the resource with a tag key and value. +Tags the resource with a tag key and value. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) to use to tag a resource. -- `tags`: The tags used to tag the resource. +- `tags`: The tags used to tag the resource. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1269,11 +1299,11 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) - Untags a resource with a specified Amazon Resource Name (ARN). +Untags a resource with a specified Amazon Resource Name (ARN). # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) to use to untag a resource. -- `tag_keys`: The tag keys to use to untag a resource. +- `resource_arn`: The Amazon Resource Name (ARN) to use to untag a resource. +- `tag_keys`: The tag keys to use to untag a resource. """ function untag_resource( @@ -1306,10 +1336,10 @@ end update_app(app_id) update_app(app_id, params::Dict{String,<:Any}) - Updates an existing Amplify app. +Updates an existing Amplify app. # Arguments -- `app_id`: The unique ID for an Amplify app. +- `app_id`: The unique ID for an Amplify app. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1322,26 +1352,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide . -- `"autoBranchCreationConfig"`: The automated branch creation configuration for an Amplify +- `"autoBranchCreationConfig"`: The automated branch creation configuration for an Amplify app. -- `"autoBranchCreationPatterns"`: Describes the automated branch creation glob patterns - for an Amplify app. -- `"basicAuthCredentials"`: The basic authorization credentials for an Amplify app. You +- `"autoBranchCreationPatterns"`: Describes the automated branch creation glob patterns for + an Amplify app. +- `"basicAuthCredentials"`: The basic authorization credentials for an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password. -- `"buildSpec"`: The build specification (build spec) for an Amplify app. +- `"buildSpec"`: The build specification (build spec) for an Amplify app. - `"customHeaders"`: The custom HTTP headers for an Amplify app. -- `"customRules"`: The custom redirect and rewrite rules for an Amplify app. -- `"description"`: The description for an Amplify app. -- `"enableAutoBranchCreation"`: Enables automated branch creation for an Amplify app. -- `"enableBasicAuth"`: Enables basic authorization for an Amplify app. -- `"enableBranchAutoBuild"`: Enables branch auto-building for an Amplify app. -- `"enableBranchAutoDeletion"`: Automatically disconnects a branch in the Amplify Console +- `"customRules"`: The custom redirect and rewrite rules for an Amplify app. +- `"description"`: The description for an Amplify app. +- `"enableAutoBranchCreation"`: Enables automated branch creation for an Amplify app. +- `"enableBasicAuth"`: Enables basic authorization for an Amplify app. +- `"enableBranchAutoBuild"`: Enables branch auto-building for an Amplify app. +- `"enableBranchAutoDeletion"`: Automatically disconnects a branch in the Amplify console when you delete a branch from your Git repository. -- `"environmentVariables"`: The environment variables for an Amplify app. -- `"iamServiceRoleArn"`: The AWS Identity and Access Management (IAM) service role for an +- `"environmentVariables"`: The environment variables for an Amplify app. +- `"iamServiceRoleArn"`: The AWS Identity and Access Management (IAM) service role for an Amplify app. -- `"name"`: The name for an Amplify app. +- `"name"`: The name for an Amplify app. - `"oauthToken"`: The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored. Use oauthToken for repository providers other than @@ -1351,11 +1381,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide . -- `"platform"`: The platform for the Amplify app. For a static app, set the platform type +- `"platform"`: The platform for the Amplify app. For a static app, set the platform type to WEB. For a dynamic server-side rendered (SSR) app, set the platform type to WEB_COMPUTE. For an app requiring Amplify Hosting's original SSR support only, set the platform type to WEB_DYNAMIC. -- `"repository"`: The name of the repository for an Amplify app +- `"repository"`: The name of the Git repository for an Amplify app. """ function update_app(appId; aws_config::AbstractAWSConfig=global_aws_config()) return amplify( @@ -1382,12 +1412,17 @@ end # Arguments - `app_id`: The unique ID for an Amplify app. -- `branch_name`: The name for the branch. +- `branch_name`: The name of the branch. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backendEnvironmentArn"`: The Amazon Resource Name (ARN) for a backend environment that - is part of an Amplify app. +- `"backend"`: The backend for a Branch of an Amplify app. Use for a backend created from + an CloudFormation stack. This field is available to Amplify Gen 2 apps only. When you + deploy an application with Amplify Gen 2, you provision the app's backend infrastructure + using Typescript code. +- `"backendEnvironmentArn"`: The Amazon Resource Name (ARN) for a backend environment that + is part of a Gen 1 Amplify app. This field is available to Amplify Gen 1 apps only where + the backend is created using Amplify Studio or the Amplify command line interface (CLI). - `"basicAuthCredentials"`: The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password. - `"buildSpec"`: The build specification (build spec) for the branch. @@ -1435,7 +1470,7 @@ end update_domain_association(app_id, domain_name) update_domain_association(app_id, domain_name, params::Dict{String,<:Any}) - Creates a new domain association for an Amplify app. +Creates a new domain association for an Amplify app. # Arguments - `app_id`: The unique ID for an Amplify app. @@ -1447,6 +1482,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys creation. - `"autoSubDomainIAMRole"`: The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains. +- `"certificateSettings"`: The type of SSL/TLS certificate to use for your custom domain. - `"enableAutoSubDomain"`: Enables the automated creation of subdomains for branches. - `"subDomainSettings"`: Describes the settings for the subdomain. """ @@ -1479,15 +1515,15 @@ end update_webhook(webhook_id) update_webhook(webhook_id, params::Dict{String,<:Any}) - Updates a webhook. +Updates a webhook. # Arguments -- `webhook_id`: The unique ID for a webhook. +- `webhook_id`: The unique ID for a webhook. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"branchName"`: The name for a branch that is part of an Amplify app. -- `"description"`: The description for a webhook. +- `"branchName"`: The name for a branch that is part of an Amplify app. +- `"description"`: The description for a webhook. """ function update_webhook(webhookId; aws_config::AbstractAWSConfig=global_aws_config()) return amplify( diff --git a/src/services/amplifyuibuilder.jl b/src/services/amplifyuibuilder.jl index b03c9dbd83..f9bc5efd3d 100644 --- a/src/services/amplifyuibuilder.jl +++ b/src/services/amplifyuibuilder.jl @@ -64,7 +64,7 @@ end create_form(app_id, environment_name, form_to_create) create_form(app_id, environment_name, form_to_create, params::Dict{String,<:Any}) -Creates a new form for an Amplify. +Creates a new form for an Amplify app. # Arguments - `app_id`: The unique ID of the Amplify app to associate with the form. @@ -280,7 +280,7 @@ end exchange_code_for_token(provider, request) exchange_code_for_token(provider, request, params::Dict{String,<:Any}) -Exchanges an access code for a token. + This is for internal use. Amplify uses this action to exchange an access code for a token. # Arguments - `provider`: The third-party provider for the token. The only valid value is figma. @@ -740,6 +740,40 @@ function list_forms( ) end +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags for a specified Amazon Resource Name (ARN). + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) to use to list tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return amplifyuibuilder( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amplifyuibuilder( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_themes(app_id, environment_name) list_themes(app_id, environment_name, params::Dict{String,<:Any}) @@ -829,7 +863,8 @@ end refresh_token(provider, refresh_token_body) refresh_token(provider, refresh_token_body, params::Dict{String,<:Any}) -Refreshes a previously issued access token that might have expired. + This is for internal use. Amplify uses this action to refresh a previously issued access +token that might have expired. # Arguments - `provider`: The third-party provider for the token. The only valid value is figma. @@ -870,7 +905,7 @@ end start_codegen_job(app_id, codegen_job_to_create, environment_name) start_codegen_job(app_id, codegen_job_to_create, environment_name, params::Dict{String,<:Any}) -Starts a code generation job for for a specified Amplify app and backend environment. +Starts a code generation job for a specified Amplify app and backend environment. # Arguments - `app_id`: The unique ID for the Amplify app. @@ -923,6 +958,78 @@ function start_codegen_job( ) end +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Tags the resource with a tag key and value. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) to use to tag a resource. +- `tags`: A list of tag key value pairs for a specified Amazon Resource Name (ARN). + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return amplifyuibuilder( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amplifyuibuilder( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Untags a resource with a specified Amazon Resource Name (ARN). + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) to use to untag a resource. +- `tag_keys`: The tag keys to use to untag a resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return amplifyuibuilder( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return amplifyuibuilder( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_component(app_id, environment_name, id, updated_component) update_component(app_id, environment_name, id, updated_component, params::Dict{String,<:Any}) diff --git a/src/services/api_gateway.jl b/src/services/api_gateway.jl index c20684e5f8..8af11cd120 100644 --- a/src/services/api_gateway.jl +++ b/src/services/api_gateway.jl @@ -12,8 +12,8 @@ Create an ApiKey resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"customerId"`: An AWS Marketplace customer identifier , when integrating with the AWS - SaaS Marketplace. +- `"customerId"`: An Amazon Web Services Marketplace customer identifier, when integrating + with the Amazon Web Services SaaS Marketplace. - `"description"`: The description of the ApiKey. - `"enabled"`: Specifies whether the ApiKey can be used by callers. - `"generateDistinctId"`: Specifies whether (true) or not (false) the key identifier is @@ -330,9 +330,9 @@ Creates a new domain name. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"certificateArn"`: The reference to an AWS-managed certificate that will be used by - edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported - source. +- `"certificateArn"`: The reference to an Amazon Web Services-managed certificate that will + be used by edge-optimized endpoint for this domain name. Certificate Manager is the only + supported source. - `"certificateBody"`: [Deprecated] The body of the server certificate that will be used by edge-optimized endpoint for this domain name provided by your certificate authority. - `"certificateChain"`: [Deprecated] The intermediate certificates and optionally the root @@ -351,9 +351,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ownershipVerificationCertificateArn"`: The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn. -- `"regionalCertificateArn"`: The reference to an AWS-managed certificate that will be used - by regional endpoint for this domain name. AWS Certificate Manager is the only supported - source. +- `"regionalCertificateArn"`: The reference to an Amazon Web Services-managed certificate + that will be used by regional endpoint for this domain name. Certificate Manager is the + only supported source. - `"regionalCertificateName"`: The user-friendly name of the certificate that will be used by regional endpoint for this domain name. - `"securityPolicy"`: The Transport Layer Security (TLS) version + cipher suite for this @@ -402,7 +402,7 @@ Adds a new Model resource to an existing RestApi resource. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: The description of the model. - `"schema"`: The schema for the model. For application/json models, this should be JSON - schema draft 4 model. + schema draft 4 model. The maximum size of the model is 400 KB. """ function create_model( contentType, name, restapi_id; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,9 +531,8 @@ Creates a new RestApi resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"apiKeySource"`: The source of the API key for metering requests according to a usage - plan. Valid values are: >HEADER to read the API key from the X-API-Key header of a - request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom - authorizer. + plan. Valid values are: HEADER to read the API key from the X-API-Key header of a request. + AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer. - `"binaryMediaTypes"`: The list of binary media types supported by the RestApi. By default, the RestApi supports only UTF-8-encoded text payloads. - `"cloneFrom"`: The ID of the RestApi that you want to clone from. @@ -734,7 +733,8 @@ must have permissions to create and update VPC Endpoint services. # Arguments - `name`: The name used to label and identify the VPC link. - `target_arns`: The ARN of the network load balancer of the VPC targeted by the VPC link. - The network load balancer must be owned by the same AWS account of the API owner. + The network load balancer must be owned by the same Amazon Web Services account of the API + owner. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1694,8 +1694,8 @@ Gets information about the current ApiKeys resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"customerId"`: The identifier of a customer in AWS Marketplace or an external system, - such as a developer portal. +- `"customerId"`: The identifier of a customer in Amazon Web Services Marketplace or an + external system, such as a developer portal. - `"includeValues"`: A boolean flag to specify whether (true) or not (false) the result contains key values. - `"limit"`: The maximum number of returned results per page. The default value is 25 and @@ -3372,9 +3372,9 @@ Imports documentation parts Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"failonwarnings"`: A query parameter to specify whether to rollback the documentation importation (true) or not (false) when a warning is encountered. The default value is false. -- `"mode"`: A query parameter to indicate whether to overwrite (OVERWRITE) any existing - DocumentationParts definition or to merge (MERGE) the new definition into the existing one. - The default value is MERGE. +- `"mode"`: A query parameter to indicate whether to overwrite (overwrite) any existing + DocumentationParts definition or to merge (merge) the new definition into the existing one. + The default value is merge. """ function import_documentation_parts( body, restapi_id; aws_config::AbstractAWSConfig=global_aws_config() @@ -3424,9 +3424,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameters as ignore=documentation. To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE. To handle imported - basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split. For - example, the AWS CLI command to exclude documentation from the imported API is: The AWS CLI - command to set the regional endpoint on the imported API is: + basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split. """ function import_rest_api(body; aws_config::AbstractAWSConfig=global_aws_config()) return api_gateway( @@ -4286,7 +4284,7 @@ Updates a documentation version. # Arguments - `doc_version`: The version identifier of the to-be-updated documentation version. -- `restapi_id`: The string identifier of the associated RestApi.. +- `restapi_id`: The string identifier of the associated RestApi. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4578,7 +4576,7 @@ end update_model(model_name, restapi_id) update_model(model_name, restapi_id, params::Dict{String,<:Any}) -Changes information about a model. +Changes information about a model. The maximum size of the model is 400 KB. # Arguments - `model_name`: The name of the model to update. diff --git a/src/services/apigatewayv2.jl b/src/services/apigatewayv2.jl index 34c8801569..aacd7778c2 100644 --- a/src/services/apigatewayv2.jl +++ b/src/services/apigatewayv2.jl @@ -1149,7 +1149,7 @@ end delete_route_request_parameter(api_id, request_parameter_key, route_id) delete_route_request_parameter(api_id, request_parameter_key, route_id, params::Dict{String,<:Any}) -Deletes a route request parameter. +Deletes a route request parameter. Supported only for WebSocket APIs. # Arguments - `api_id`: The API identifier. diff --git a/src/services/appconfig.jl b/src/services/appconfig.jl index bbf1b1ec28..9cd18ee642 100644 --- a/src/services/appconfig.jl +++ b/src/services/appconfig.jl @@ -67,18 +67,25 @@ Guide. - `location_uri`: A URI to locate the configuration. You can specify the following: For the AppConfig hosted configuration store and for feature flags, specify hosted. For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter - name in the format ssm-parameter://<parameter name> or the ARN. For an Secrets - Manager secret, specify the URI in the following format: secrets-manager://<secret - name>. For an Amazon S3 object, specify the URI in the following format: - s3://<bucket>/<objectKey> . Here is an example: - s3://my-bucket/my-app/us-east-1/my-config.json For an SSM document, specify either the - document name in the format ssm-document://<document name> or the Amazon Resource - Name (ARN). + name in the format ssm-parameter://<parameter name> or the ARN. For an Amazon Web + Services CodePipeline pipeline, specify the URI in the following format: + codepipeline://<pipeline name>. For an Secrets Manager secret, specify the URI in + the following format: secretsmanager://<secret name>. For an Amazon S3 object, + specify the URI in the following format: s3://<bucket>/<objectKey> . Here is an + example: s3://my-bucket/my-app/us-east-1/my-config.json For an SSM document, specify + either the document name in the format ssm-document://<document name> or the Amazon + Resource Name (ARN). - `name`: A name for the configuration profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A description of the configuration profile. +- `"KmsKeyIdentifier"`: The identifier for an Key Management Service key to encrypt new + configuration data versions in the AppConfig hosted configuration store. This attribute is + only used for hosted configuration types. The identifier can be an KMS key ID, alias, or + the Amazon Resource Name (ARN) of the key ID or alias. To encrypt data managed in other + configuration stores, see the documentation for how to specify an KMS key for that + particular service. - `"RetrievalRoleArn"`: The ARN of an IAM role with permission to access the configuration at the specified LocationUri. A retrieval role ARN is not required for configurations stored in the AppConfig hosted configuration store. It is required for all other sources @@ -269,10 +276,16 @@ end Creates an AppConfig extension. An extension augments your ability to inject logic or behavior at different points during the AppConfig workflow of creating or deploying a configuration. You can create your own extensions or use the Amazon Web Services authored -extensions provided by AppConfig. For most use cases, to create your own extension, you -must create an Lambda function to perform any computation and processing defined in the -extension. For more information about extensions, see Working with AppConfig extensions in -the AppConfig User Guide. +extensions provided by AppConfig. For an AppConfig extension that uses Lambda, you must +create a Lambda function to perform any computation and processing defined in the +extension. If you plan to create custom versions of the Amazon Web Services authored +notification extensions, you only need to specify an Amazon Resource Name (ARN) in the Uri +field for the new extension version. For a custom EventBridge notification extension, +enter the ARN of the EventBridge default events in the Uri field. For a custom Amazon SNS +notification extension, enter the ARN of an Amazon SNS topic in the Uri field. For a +custom Amazon SQS notification extension, enter the ARN of an Amazon SQS message queue in +the Uri field. For more information about extensions, see Extending workflows in the +AppConfig User Guide. # Arguments - `actions`: The actions defined in the extension. @@ -333,8 +346,7 @@ anytime a configuration deployment is started for a specific application. Defini extension to associate with an AppConfig resource is called an extension association. An extension association is a specified relationship between an extension and an AppConfig resource, such as an application or a configuration profile. For more information about -extensions and associations, see Working with AppConfig extensions in the AppConfig User -Guide. +extensions and associations, see Extending workflows in the AppConfig User Guide. # Arguments - `extension_identifier`: The name, the ID, or the Amazon Resource Name (ARN) of the @@ -1022,8 +1034,7 @@ end get_extension_association(extension_association_id, params::Dict{String,<:Any}) Returns information about an AppConfig extension association. For more information about -extensions and associations, see Working with AppConfig extensions in the AppConfig User -Guide. +extensions and associations, see Extending workflows in the AppConfig User Guide. # Arguments - `extension_association_id`: The extension association ID to get. @@ -1286,8 +1297,7 @@ end list_extension_associations(params::Dict{String,<:Any}) Lists all AppConfig extension associations in the account. For more information about -extensions and associations, see Working with AppConfig extensions in the AppConfig User -Guide. +extensions and associations, see Extending workflows in the AppConfig User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1326,8 +1336,7 @@ end list_extensions(params::Dict{String,<:Any}) Lists all custom and Amazon Web Services authored AppConfig extensions in the account. For -more information about extensions, see Working with AppConfig extensions in the AppConfig -User Guide. +more information about extensions, see Extending workflows in the AppConfig User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1439,12 +1448,15 @@ Starts a deployment. - `configuration_profile_id`: The configuration profile ID. - `configuration_version`: The configuration version to deploy. If deploying an AppConfig hosted configuration version, you can specify either the version number or version label. + For all other configurations, you must specify the version number. - `deployment_strategy_id`: The deployment strategy ID. - `environment_id`: The environment ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A description of the deployment. +- `"DynamicExtensionParameters"`: A map of dynamic extension parameter names to values to + pass to associated extensions with PRE_START_DEPLOYMENT actions. - `"KmsKeyIdentifier"`: The KMS key identifier (key ID, key alias, or key ARN). AppConfig uses this ID to encrypt the configuration data using a customer managed key. - `"Tags"`: Metadata to assign to the deployment. Tags help organize and categorize your @@ -1668,6 +1680,12 @@ Updates a configuration profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A description of the configuration profile. +- `"KmsKeyIdentifier"`: The identifier for a Key Management Service key to encrypt new + configuration data versions in the AppConfig hosted configuration store. This attribute is + only used for hosted configuration types. The identifier can be an KMS key ID, alias, or + the Amazon Resource Name (ARN) of the key ID or alias. To encrypt data managed in other + configuration stores, see the documentation for how to specify an KMS key for that + particular service. - `"Name"`: The name of the configuration profile. - `"RetrievalRoleArn"`: The ARN of an IAM role with permission to access the configuration at the specified LocationUri. @@ -1799,8 +1817,8 @@ end update_extension(extension_identifier) update_extension(extension_identifier, params::Dict{String,<:Any}) -Updates an AppConfig extension. For more information about extensions, see Working with -AppConfig extensions in the AppConfig User Guide. +Updates an AppConfig extension. For more information about extensions, see Extending +workflows in the AppConfig User Guide. # Arguments - `extension_identifier`: The name, the ID, or the Amazon Resource Name (ARN) of the @@ -1841,8 +1859,8 @@ end update_extension_association(extension_association_id) update_extension_association(extension_association_id, params::Dict{String,<:Any}) -Updates an association. For more information about extensions and associations, see Working -with AppConfig extensions in the AppConfig User Guide. +Updates an association. For more information about extensions and associations, see +Extending workflows in the AppConfig User Guide. # Arguments - `extension_association_id`: The system-generated ID for the association. diff --git a/src/services/appfabric.jl b/src/services/appfabric.jl new file mode 100644 index 0000000000..f0e606cff0 --- /dev/null +++ b/src/services/appfabric.jl @@ -0,0 +1,1212 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: appfabric +using AWS.Compat +using AWS.UUIDs + +""" + batch_get_user_access_tasks(app_bundle_identifier, task_id_list) + batch_get_user_access_tasks(app_bundle_identifier, task_id_list, params::Dict{String,<:Any}) + +Gets user access details in a batch request. This action polls data from the tasks that are +kicked off by the StartUserAccessTasks action. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `task_id_list`: The tasks IDs to use for the request. + +""" +function batch_get_user_access_tasks( + appBundleIdentifier, taskIdList; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/useraccess/batchget", + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "taskIdList" => taskIdList + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_user_access_tasks( + appBundleIdentifier, + taskIdList, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/useraccess/batchget", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "taskIdList" => taskIdList + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + connect_app_authorization(app_authorization_identifier, app_bundle_identifier) + connect_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Establishes a connection between Amazon Web Services AppFabric and an application, which +allows AppFabric to call the APIs of the application. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle that contains the app authorization to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"authRequest"`: Contains OAuth2 authorization information. This is required if the app + authorization for the request is configured with an OAuth2 (oauth2) authorization type. +""" +function connect_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)/connect"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function connect_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)/connect", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_authorization(app, app_bundle_identifier, auth_type, credential, tenant) + create_app_authorization(app, app_bundle_identifier, auth_type, credential, tenant, params::Dict{String,<:Any}) + +Creates an app authorization within an app bundle, which allows AppFabric to connect to an +application. + +# Arguments +- `app`: The name of the application. Valid values are: SLACK ASANA JIRA + M365 M365AUDITLOGS ZOOM ZENDESK OKTA GOOGLE DROPBOX SMARTSHEET + CISCO +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `auth_type`: The authorization type for the app authorization. +- `credential`: Contains credentials for the application, such as an API key or OAuth2 + client ID and secret. Specify credentials that match the authorization type for your + request. For example, if the authorization type for your request is OAuth2 (oauth2), then + you should provide only the OAuth2 credentials. +- `tenant`: Contains information about an application tenant, such as the application + display name and identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_app_authorization( + app, + appBundleIdentifier, + authType, + credential, + tenant; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + Dict{String,Any}( + "app" => app, + "authType" => authType, + "credential" => credential, + "tenant" => tenant, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_authorization( + app, + appBundleIdentifier, + authType, + credential, + tenant, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "app" => app, + "authType" => authType, + "credential" => credential, + "tenant" => tenant, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_bundle() + create_app_bundle(params::Dict{String,<:Any}) + +Creates an app bundle to collect data from an application using AppFabric. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"customerManagedKeyIdentifier"`: The Amazon Resource Name (ARN) of the Key Management + Service (KMS) key to use to encrypt the application data. If this is not specified, an + Amazon Web Services owned key is used for encryption. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_app_bundle(; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "POST", + "/appbundles", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_bundle( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/appbundles", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_ingestion(app, app_bundle_identifier, ingestion_type, tenant_id) + create_ingestion(app, app_bundle_identifier, ingestion_type, tenant_id, params::Dict{String,<:Any}) + +Creates a data ingestion for an application. + +# Arguments +- `app`: The name of the application. Valid values are: SLACK ASANA JIRA + M365 M365AUDITLOGS ZOOM ZENDESK OKTA GOOGLE DROPBOX SMARTSHEET + CISCO +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_type`: The ingestion type. +- `tenant_id`: The ID of the application tenant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_ingestion( + app, + appBundleIdentifier, + ingestionType, + tenantId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions", + Dict{String,Any}( + "app" => app, + "ingestionType" => ingestionType, + "tenantId" => tenantId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ingestion( + app, + appBundleIdentifier, + ingestionType, + tenantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "app" => app, + "ingestionType" => ingestionType, + "tenantId" => tenantId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_identifier, processing_configuration) + create_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_identifier, processing_configuration, params::Dict{String,<:Any}) + +Creates an ingestion destination, which specifies how an application's ingested data is +processed by Amazon Web Services AppFabric and where it's delivered. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `destination_configuration`: Contains information about the destination of ingested data. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. +- `processing_configuration`: Contains information about how ingested data is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionIdentifier, + processingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + Dict{String,Any}( + "destinationConfiguration" => destinationConfiguration, + "processingConfiguration" => processingConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionIdentifier, + processingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinationConfiguration" => destinationConfiguration, + "processingConfiguration" => processingConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_app_authorization(app_authorization_identifier, app_bundle_identifier) + delete_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Deletes an app authorization. You must delete the associated ingestion before you can +delete an app authorization. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function delete_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_app_bundle(app_bundle_identifier) + delete_app_bundle(app_bundle_identifier, params::Dict{String,<:Any}) + +Deletes an app bundle. You must delete all associated app authorizations before you can +delete an app bundle. + +# Arguments +- `app_bundle_identifier`: The ID or Amazon Resource Name (ARN) of the app bundle that + needs to be deleted. + +""" +function delete_app_bundle( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_bundle( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_ingestion(app_bundle_identifier, ingestion_identifier) + delete_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Deletes an ingestion. You must stop (disable) the ingestion and you must delete all +associated ingestion destinations before you can delete an app ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function delete_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier) + delete_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Deletes an ingestion destination. This deletes the association between an ingestion and +it's destination. It doesn't delete previously ingested data or the storage destination, +such as the Amazon S3 bucket where the data is delivered. If the ingestion destination is +deleted while the associated ingestion is enabled, the ingestion will fail and is +eventually disabled. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function delete_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_app_authorization(app_authorization_identifier, app_bundle_identifier) + get_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Returns information about an app authorization. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function get_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_app_bundle(app_bundle_identifier) + get_app_bundle(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns information about an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function get_app_bundle( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_app_bundle( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingestion(app_bundle_identifier, ingestion_identifier) + get_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns information about an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function get_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier) + get_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns information about an ingestion destination. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function get_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_app_authorizations(app_bundle_identifier) + list_app_authorizations(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns a list of all app authorizations configured for an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_app_authorizations( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_app_authorizations( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_app_bundles() + list_app_bundles(params::Dict{String,<:Any}) + +Returns a list of app bundles. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_app_bundles(; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "GET", "/appbundles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_app_bundles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", "/appbundles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_ingestion_destinations(app_bundle_identifier, ingestion_identifier) + list_ingestion_destinations(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns a list of all ingestion destinations configured for an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_ingestion_destinations( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_ingestion_destinations( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_ingestions(app_bundle_identifier) + list_ingestions(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns a list of all ingestions configured for an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_ingestions( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_ingestions( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags for a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to + retrieve tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_ingestion(app_bundle_identifier, ingestion_identifier) + start_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Starts (enables) an ingestion, which collects data from an application. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function start_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/start"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/start", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_user_access_tasks(app_bundle_identifier, email) + start_user_access_tasks(app_bundle_identifier, email, params::Dict{String,<:Any}) + +Starts the tasks to search user access status for a specific email address. The tasks are +stopped when the user access status data is found. The tasks are terminated when the API +calls to the application time out. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `email`: The email address of the target user. + +""" +function start_user_access_tasks( + appBundleIdentifier, email; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/useraccess/start", + Dict{String,Any}("appBundleIdentifier" => appBundleIdentifier, "email" => email); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_user_access_tasks( + appBundleIdentifier, + email, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/useraccess/start", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "email" => email + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_ingestion(app_bundle_identifier, ingestion_identifier) + stop_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Stops (disables) an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function stop_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. +- `tags`: A map of the key-value pairs of the tag or tags to assign to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag or tags from a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to untag. +- `tag_keys`: The keys of the key-value pairs for the tag or tags you want to remove from + the specified resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_app_authorization(app_authorization_identifier, app_bundle_identifier) + update_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Updates an app authorization within an app bundle, which allows AppFabric to connect to an +application. If the app authorization was in a connected state, updating the app +authorization will set it back to a PendingConnect state. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"credential"`: Contains credentials for the application, such as an API key or OAuth2 + client ID and secret. Specify credentials that match the authorization type of the app + authorization to update. For example, if the authorization type of the app authorization is + OAuth2 (oauth2), then you should provide only the OAuth2 credentials. +- `"tenant"`: Contains information about an application tenant, such as the application + display name and identifier. +""" +function update_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_destination_identifier, ingestion_identifier) + update_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Updates an ingestion destination, which specifies how an application's ingested data is +processed by Amazon Web Services AppFabric and where it's delivered. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `destination_configuration`: Contains information about the destination of ingested data. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function update_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + Dict{String,Any}("destinationConfiguration" => destinationConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("destinationConfiguration" => destinationConfiguration), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/appflow.jl b/src/services/appflow.jl index aa84d5f2c9..1afb75c8c7 100644 --- a/src/services/appflow.jl +++ b/src/services/appflow.jl @@ -768,6 +768,71 @@ function register_connector( ) end +""" + reset_connector_metadata_cache() + reset_connector_metadata_cache(params::Dict{String,<:Any}) + +Resets metadata about your connector entities that Amazon AppFlow stored in its cache. Use +this action when you want Amazon AppFlow to return the latest information about the data +that you have in a source application. Amazon AppFlow returns metadata about your entities +when you use the ListConnectorEntities or DescribeConnectorEntities actions. Following +these actions, Amazon AppFlow caches the metadata to reduce the number of API requests that +it must send to the source application. Amazon AppFlow automatically resets the cache once +every hour, but you can use this action when you want to get the latest metadata right away. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"apiVersion"`: The API version that you specified in the connector profile that you’re + resetting cached metadata for. You must use this parameter only if the connector supports + multiple API versions or if the connector type is CustomConnector. To look up how many + versions a connector supports, use the DescribeConnectors action. In the response, find the + value that Amazon AppFlow returns for the connectorVersion parameter. To look up the + connector type, use the DescribeConnectorProfiles action. In the response, find the value + that Amazon AppFlow returns for the connectorType parameter. To look up the API version + that you specified in a connector profile, use the DescribeConnectorProfiles action. +- `"connectorEntityName"`: Use this parameter if you want to reset cached metadata about + the details for an individual entity. If you don't include this parameter in your request, + Amazon AppFlow only resets cached metadata about entity names, not entity details. +- `"connectorProfileName"`: The name of the connector profile that you want to reset cached + metadata for. You can omit this parameter if you're resetting the cache for any of the + following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, + Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you must + include this parameter in your request. +- `"connectorType"`: The type of connector to reset cached metadata for. You must include + this parameter in your request if you're resetting the cache for any of the following + connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or + Upsolver. If you're resetting the cache for any other connector, you can omit this + parameter from your request. +- `"entitiesPath"`: Use this parameter only if you’re resetting the cached metadata about + a nested entity. Only some connectors support nested entities. A nested entity is one that + has another entity as a parent. To use this parameter, specify the name of the parent + entity. To look up the parent-child relationship of entities, you can send a + ListConnectorEntities request that omits the entitiesPath parameter. Amazon AppFlow will + return a list of top-level entities. For each one, it indicates whether the entity has + nested entities. Then, in a subsequent ListConnectorEntities request, you can specify a + parent entity name for the entitiesPath parameter. Amazon AppFlow will return a list of the + child entities for that parent. +""" +function reset_connector_metadata_cache(; aws_config::AbstractAWSConfig=global_aws_config()) + return appflow( + "POST", + "/reset-connector-metadata-cache"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_connector_metadata_cache( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appflow( + "POST", + "/reset-connector-metadata-cache", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_flow(flow_name) start_flow(flow_name, params::Dict{String,<:Any}) diff --git a/src/services/appintegrations.jl b/src/services/appintegrations.jl index b073428030..326efb01fa 100644 --- a/src/services/appintegrations.jl +++ b/src/services/appintegrations.jl @@ -5,8 +5,80 @@ using AWS.Compat using AWS.UUIDs """ - create_data_integration(kms_key, name, schedule_config, source_uri) - create_data_integration(kms_key, name, schedule_config, source_uri, params::Dict{String,<:Any}) + create_application(application_source_config, name, namespace) + create_application(application_source_config, name, namespace, params::Dict{String,<:Any}) + +This API is in preview release and subject to change. Creates and persists an Application +resource. + +# Arguments +- `application_source_config`: The configuration for where the application should be loaded + from. +- `name`: The name of the application. +- `namespace`: The namespace of the application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"Description"`: The description of the application. +- `"Permissions"`: The configuration of events or requests that the application has access + to. +- `"Publications"`: The events that the application publishes. +- `"Subscriptions"`: The events that the application subscribes. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_application( + ApplicationSourceConfig, + Name, + Namespace; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "POST", + "/applications", + Dict{String,Any}( + "ApplicationSourceConfig" => ApplicationSourceConfig, + "Name" => Name, + "Namespace" => Namespace, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_application( + ApplicationSourceConfig, + Name, + Namespace, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "POST", + "/applications", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationSourceConfig" => ApplicationSourceConfig, + "Name" => Name, + "Namespace" => Namespace, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_data_integration(kms_key, name, source_uri) + create_data_integration(kms_key, name, source_uri, params::Dict{String,<:Any}) Creates and persists a DataIntegration resource. You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different @@ -15,7 +87,6 @@ DataIntegration, or recreate the DataIntegration using the CreateDataIntegration # Arguments - `kms_key`: The KMS key for the DataIntegration. - `name`: The name of the DataIntegration. -- `schedule_config`: The name of the data and how often it should be pulled from the source. - `source_uri`: The URI of the data source. # Optional Parameters @@ -26,15 +97,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: A description of the DataIntegration. - `"FileConfiguration"`: The configuration for what files should be pulled from the source. - `"ObjectConfiguration"`: The configuration for what data should be pulled from the source. +- `"ScheduleConfig"`: The name of the data and how often it should be pulled from the + source. - `"Tags"`: The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_data_integration( - KmsKey, - Name, - ScheduleConfig, - SourceURI; - aws_config::AbstractAWSConfig=global_aws_config(), + KmsKey, Name, SourceURI; aws_config::AbstractAWSConfig=global_aws_config() ) return appintegrations( "POST", @@ -42,7 +111,6 @@ function create_data_integration( Dict{String,Any}( "KmsKey" => KmsKey, "Name" => Name, - "ScheduleConfig" => ScheduleConfig, "SourceURI" => SourceURI, "ClientToken" => string(uuid4()), ); @@ -53,7 +121,6 @@ end function create_data_integration( KmsKey, Name, - ScheduleConfig, SourceURI, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -67,7 +134,6 @@ function create_data_integration( Dict{String,Any}( "KmsKey" => KmsKey, "Name" => Name, - "ScheduleConfig" => ScheduleConfig, "SourceURI" => SourceURI, "ClientToken" => string(uuid4()), ), @@ -145,6 +211,41 @@ function create_event_integration( ) end +""" + delete_application(application_identifier) + delete_application(application_identifier, params::Dict{String,<:Any}) + +Deletes the Application. Only Applications that don't have any Application Associations can +be deleted. + +# Arguments +- `application_identifier`: The Amazon Resource Name (ARN) of the Application. + +""" +function delete_application( + ApplicationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "DELETE", + "/applications/$(ApplicationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_application( + ApplicationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "DELETE", + "/applications/$(ApplicationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_data_integration(identifier) delete_data_integration(identifier, params::Dict{String,<:Any}) @@ -215,6 +316,40 @@ function delete_event_integration( ) end +""" + get_application(application_identifier) + get_application(application_identifier, params::Dict{String,<:Any}) + +This API is in preview release and subject to change. Get an Application resource. + +# Arguments +- `application_identifier`: The Amazon Resource Name (ARN) of the Application. + +""" +function get_application( + ApplicationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "GET", + "/applications/$(ApplicationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application( + ApplicationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "GET", + "/applications/$(ApplicationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_data_integration(identifier) get_data_integration(identifier, params::Dict{String,<:Any}) @@ -279,6 +414,74 @@ function get_event_integration( ) end +""" + list_application_associations(application_identifier) + list_application_associations(application_identifier, params::Dict{String,<:Any}) + +Returns a paginated list of application associations for an application. + +# Arguments +- `application_identifier`: A unique identifier for the Application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_application_associations( + ApplicationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "GET", + "/applications/$(ApplicationIdentifier)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_associations( + ApplicationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "GET", + "/applications/$(ApplicationIdentifier)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_applications() + list_applications(params::Dict{String,<:Any}) + +This API is in preview release and subject to change. Lists applications in the account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_applications(; aws_config::AbstractAWSConfig=global_aws_config()) + return appintegrations( + "GET", "/applications"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_applications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "GET", + "/applications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_data_integration_associations(identifier) list_data_integration_associations(identifier, params::Dict{String,<:Any}) @@ -526,6 +729,51 @@ function untag_resource( ) end +""" + update_application(application_identifier) + update_application(application_identifier, params::Dict{String,<:Any}) + +This API is in preview release and subject to change. Updates and persists an Application +resource. + +# Arguments +- `application_identifier`: The Amazon Resource Name (ARN) of the Application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSourceConfig"`: The configuration for where the application should be loaded + from. +- `"Description"`: The description of the application. +- `"Name"`: The name of the application. +- `"Permissions"`: The configuration of events or requests that the application has access + to. +- `"Publications"`: The events that the application publishes. +- `"Subscriptions"`: The events that the application subscribes. +""" +function update_application( + ApplicationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "PATCH", + "/applications/$(ApplicationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_application( + ApplicationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "PATCH", + "/applications/$(ApplicationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_integration(identifier) update_data_integration(identifier, params::Dict{String,<:Any}) @@ -577,7 +825,7 @@ Updates the description of an event integration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the event inegration. +- `"Description"`: The description of the event integration. """ function update_event_integration(Name; aws_config::AbstractAWSConfig=global_aws_config()) return appintegrations( diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index 43a25dc78d..e50633819a 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -53,7 +53,9 @@ scaling policy in the Application Auto Scaling User Guide. Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The @@ -83,7 +85,8 @@ scaling policy in the Application Auto Scaling User Guide. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -180,7 +183,9 @@ more information, see Delete a scheduled action in the Application Auto Scaling Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The @@ -210,7 +215,8 @@ more information, see Delete a scheduled action in the Application Auto Scaling Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `scheduled_action_name`: The name of the scheduled action. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource @@ -310,7 +316,9 @@ with it. Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. @@ -341,7 +349,8 @@ with it. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -446,7 +455,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `"ScalableDimension"`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. @@ -478,7 +489,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. """ function describe_scalable_targets( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -571,7 +583,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an @@ -602,7 +616,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. """ function describe_scaling_activities( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -691,7 +706,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an @@ -722,7 +739,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. """ function describe_scaling_policies( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -810,7 +828,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an @@ -841,7 +861,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `"ScheduledActionNames"`: The names of the scheduled actions to describe. """ function describe_scheduled_actions( @@ -977,7 +998,9 @@ scaling policies that were specified for the scalable target are deleted. Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The @@ -1007,7 +1030,8 @@ scaling policies that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1016,7 +1040,7 @@ scaling policies that were specified for the scalable target are deleted. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"PolicyType"`: The scaling policy type. This parameter is required if you are creating a scaling policy. The following policy types are supported: TargetTrackingScaling—Not - supported for Amazon EMR StepScaling—Not supported for DynamoDB, Amazon Comprehend, + supported for Amazon EMR. StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. For more information, see Target tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide. @@ -1126,7 +1150,9 @@ scheduled actions that were specified for the scalable target are deleted. Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The @@ -1156,7 +1182,8 @@ scheduled actions that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `scheduled_action_name`: The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target. - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -1301,7 +1328,9 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: - endpoint/my-end-point/variant/KMeansClustering. + endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The + resource type is inference-component and the unique identifier is the resource ID. Example: + inference-component/my-inference-component. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired task count of an ECS service. @@ -1332,7 +1361,8 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. + SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + number of copies across an endpoint for a SageMaker inference component. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. diff --git a/src/services/application_discovery_service.jl b/src/services/application_discovery_service.jl index a4ce003ec0..9481d0871d 100644 --- a/src/services/application_discovery_service.jl +++ b/src/services/application_discovery_service.jl @@ -55,6 +55,43 @@ function associate_configuration_items_to_application( ) end +""" + batch_delete_agents(delete_agents) + batch_delete_agents(delete_agents, params::Dict{String,<:Any}) + + Deletes one or more agents or collectors as specified by ID. Deleting an agent or +collector does not delete the previously discovered data. To delete the data collected, use +StartBatchDeleteConfigurationTask. + +# Arguments +- `delete_agents`: The list of agents to delete. + +""" +function batch_delete_agents( + deleteAgents; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_discovery_service( + "BatchDeleteAgents", + Dict{String,Any}("deleteAgents" => deleteAgents); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_agents( + deleteAgents, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_discovery_service( + "BatchDeleteAgents", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("deleteAgents" => deleteAgents), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_delete_import_data(import_task_ids) batch_delete_import_data(import_task_ids, params::Dict{String,<:Any}) @@ -70,6 +107,10 @@ records that comes from the deleted records will also be deleted. # Arguments - `import_task_ids`: The IDs for the import tasks that you want to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deleteHistory"`: Set to true to remove the deleted import task from + DescribeImportTasks. """ function batch_delete_import_data( importTaskIds; aws_config::AbstractAWSConfig=global_aws_config() @@ -255,18 +296,18 @@ end describe_agents() describe_agents(params::Dict{String,<:Any}) -Lists agents or connectors as specified by ID or other filters. All agents/connectors -associated with your user account can be listed if you call DescribeAgents as is without -passing any parameters. +Lists agents or collectors as specified by ID or other filters. All agents/collectors +associated with your user can be listed if you call DescribeAgents as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"agentIds"`: The agent or the Connector IDs for which you want information. If you - specify no IDs, the system returns information about all agents/Connectors associated with - your Amazon Web Services user account. +- `"agentIds"`: The agent or the collector IDs for which you want information. If you + specify no IDs, the system returns information about all agents/collectors associated with + your user. - `"filters"`: You can filter the request using various logical operators and a key-value format. For example: {\"key\": \"collectionStatus\", \"value\": \"STARTED\"} -- `"maxResults"`: The total number of agents/Connectors to return in a single page of +- `"maxResults"`: The total number of agents/collectors to return in a single page of output. The maximum value is 100. - `"nextToken"`: Token to retrieve the next set of results. For example, if you previously specified 100 IDs for DescribeAgentsRequestagentIds but set DescribeAgentsRequestmaxResults @@ -286,6 +327,38 @@ function describe_agents( ) end +""" + describe_batch_delete_configuration_task(task_id) + describe_batch_delete_configuration_task(task_id, params::Dict{String,<:Any}) + + Takes a unique deletion task identifier as input and returns metadata about a +configuration deletion task. + +# Arguments +- `task_id`: The ID of the task to delete. + +""" +function describe_batch_delete_configuration_task( + taskId; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_discovery_service( + "DescribeBatchDeleteConfigurationTask", + Dict{String,Any}("taskId" => taskId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_batch_delete_configuration_task( + taskId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_discovery_service( + "DescribeBatchDeleteConfigurationTask", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("taskId" => taskId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_configurations(configuration_ids) describe_configurations(configuration_ids, params::Dict{String,<:Any}) @@ -333,8 +406,8 @@ end describe_continuous_exports() describe_continuous_exports(params::Dict{String,<:Any}) -Lists exports as specified by ID. All continuous exports associated with your user account -can be listed if you call DescribeContinuousExports as is without passing any parameters. +Lists exports as specified by ID. All continuous exports associated with your user can be +listed if you call DescribeContinuousExports as is without passing any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -466,8 +539,8 @@ end Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters. There are three valid tag filter names: tagKey tagValue configurationId Also, all configuration items associated -with your user account that have tags can be listed if you call DescribeTags as is without -passing any parameters. +with your user that have tags can be listed if you call DescribeTags as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -695,6 +768,54 @@ function list_server_neighbors( ) end +""" + start_batch_delete_configuration_task(configuration_ids, configuration_type) + start_batch_delete_configuration_task(configuration_ids, configuration_type, params::Dict{String,<:Any}) + + Takes a list of configurationId as input and starts an asynchronous deletion task to +remove the configurationItems. Returns a unique deletion task identifier. + +# Arguments +- `configuration_ids`: The list of configuration IDs that will be deleted by the task. +- `configuration_type`: The type of configuration item to delete. Supported types are: + SERVER. + +""" +function start_batch_delete_configuration_task( + configurationIds, configurationType; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_discovery_service( + "StartBatchDeleteConfigurationTask", + Dict{String,Any}( + "configurationIds" => configurationIds, "configurationType" => configurationType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_batch_delete_configuration_task( + configurationIds, + configurationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_discovery_service( + "StartBatchDeleteConfigurationTask", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configurationIds" => configurationIds, + "configurationType" => configurationType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_continuous_export() start_continuous_export(params::Dict{String,<:Any}) @@ -722,16 +843,15 @@ end start_data_collection_by_agent_ids(agent_ids) start_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to start collecting data. +Instructs the specified agents to start collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to start collecting data. If - you send a request to an agent/connector ID that you do not have permission to contact, - according to your Amazon Web Services account, the service does not throw an exception. - Instead, it returns the error in the Description field. If you send a request to multiple - agents/connectors and you do not have permission to contact some of those - agents/connectors, the system does not throw an exception. Instead, the system shows Failed - in the Description field. +- `agent_ids`: The IDs of the agents from which to start collecting data. If you send a + request to an agent ID that you do not have permission to contact, according to your Amazon + Web Services account, the service does not throw an exception. Instead, it returns the + error in the Description field. If you send a request to multiple agents and you do not + have permission to contact some of those agents, the system does not throw an exception. + Instead, the system shows Failed in the Description field. """ function start_data_collection_by_agent_ids( @@ -763,14 +883,22 @@ end start_export_task() start_export_task(params::Dict{String,<:Any}) - Begins the export of discovered data to an S3 bucket. If you specify agentIds in a -filter, the task exports up to 72 hours of detailed data collected by the identified -Application Discovery Agent, including network, process, and performance details. A time -range for exported agent data may be set by using startTime and endTime. Export of detailed -agent data is limited to five concurrently running exports. If you do not include an -agentIds filter, summary data is exported that includes both Amazon Web Services Agentless -Discovery Connector data and summary data from Amazon Web Services Discovery Agents. Export -of summary data is limited to two exports per day. +Begins the export of a discovered data report to an Amazon S3 bucket managed by Amazon Web +Services. Exports might provide an estimate of fees and savings based on certain +information that you provide. Fee estimates do not include any taxes that might apply. Your +actual fees and savings depend on a variety of factors, including your actual usage of +Amazon Web Services services, which might vary from the estimates provided in this report. +If you do not specify preferences or agentIds in the filter, a summary of all servers, +applications, tags, and performance is generated. This data is an aggregation of all server +data collected through on-premises tooling, file import, application grouping and applying +tags. If you specify agentIds in a filter, the task exports up to 72 hours of detailed data +collected by the identified Application Discovery Agent, including network, process, and +performance details. A time range for exported agent data may be set by using startTime and +endTime. Export of detailed agent data is limited to five concurrently running exports. +Export of detailed agent data is limited to two exports per day. If you enable +ec2RecommendationsPreferences in preferences , an Amazon EC2 instance matching the +characteristics of each server in Application Discovery Service is generated. Changing the +attributes of the ec2RecommendationsPreferences changes the criteria of the recommendation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -782,8 +910,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and - exported data includes both Agentless Discovery Connector data and summary data from - Application Discovery agents. + exported data includes both Amazon Web Services Application Discovery Service Agentless + Collector collectors data and summary data from Application Discovery Agent agents. +- `"preferences"`: Indicates the type of data that needs to be exported. Only one + ExportPreferences can be enabled at any time. - `"startTime"`: The start timestamp for exported data from the single Application Discovery Agent selected in the filters. If no value is specified, data is exported starting from the first data collected by the agent. @@ -806,12 +936,14 @@ end start_import_task(import_url, name, params::Dict{String,<:Any}) Starts an import task, which allows you to import details of your on-premises environment -directly into Amazon Web Services Migration Hub without having to use the Application -Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This -gives you the option to perform migration assessment and planning directly from your -imported data, including the ability to group your devices as applications and track their -migration status. To start an import request, do this: Download the specially formatted -comma separated value (CSV) import template, which you can find here: +directly into Amazon Web Services Migration Hub without having to use the Amazon Web +Services Application Discovery Service (Application Discovery Service) tools such as the +Amazon Web Services Application Discovery Service Agentless Collector or Application +Discovery Agent. This gives you the option to perform migration assessment and planning +directly from your imported data, including the ability to group your devices as +applications and track their migration status. To start an import request, do this: +Download the specially formatted comma separated value (CSV) import template, which you can +find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_tem plate.csv. Fill out the template with your server and application data. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file @@ -915,10 +1047,10 @@ end stop_data_collection_by_agent_ids(agent_ids) stop_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to stop collecting data. +Instructs the specified agents to stop collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to stop collecting data. +- `agent_ids`: The IDs of the agents from which to stop collecting data. """ function stop_data_collection_by_agent_ids( diff --git a/src/services/application_insights.jl b/src/services/application_insights.jl index d7539f61f9..dcb27ea318 100644 --- a/src/services/application_insights.jl +++ b/src/services/application_insights.jl @@ -4,6 +4,61 @@ using AWS.AWSServices: application_insights using AWS.Compat using AWS.UUIDs +""" + add_workload(component_name, resource_group_name, workload_configuration) + add_workload(component_name, resource_group_name, workload_configuration, params::Dict{String,<:Any}) + +Adds a workload to a component. Each component can have at most five workloads. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_configuration`: The configuration settings of the workload. The value is the + escaped JSON of the configuration. + +""" +function add_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "AddWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "AddWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_application() create_application(params::Dict{String,<:Any}) @@ -12,6 +67,8 @@ Adds an application that is created from a resource group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttachMissingPermission"`: If set to true, the managed policies for SSM and CW will be + attached to the instance roles if they are missing. - `"AutoConfigEnabled"`: Indicates whether Application Insights automatically configures unmonitored resources in the resource group. - `"AutoCreate"`: Configures all of the resources in the resource group by applying the @@ -318,6 +375,9 @@ Describes the application. # Arguments - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_application( ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -356,6 +416,9 @@ Describes a component and lists the resources that are grouped together in a com - `component_name`: The name of the component. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_component( ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -402,6 +465,9 @@ Describes the monitoring configuration of the component. - `component_name`: The name of the component. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_component_configuration( ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -449,6 +515,10 @@ Describes the recommended monitoring configuration of the component. - `resource_group_name`: The name of the resource group. - `tier`: The tier of the application component. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RecommendationType"`: The recommended configuration type. +- `"WorkloadName"`: The name of the workload. """ function describe_component_configuration_recommendation( ComponentName, @@ -503,6 +573,9 @@ Describe a specific log pattern from a LogPatternSet. - `pattern_set_name`: The name of the log pattern set. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_log_pattern( PatternName, @@ -555,6 +628,9 @@ Describes an anomaly or error with the application. # Arguments - `observation_id`: The ID of the observation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_observation( ObservationId; aws_config::AbstractAWSConfig=global_aws_config() @@ -590,6 +666,10 @@ Describes an application problem. # Arguments - `problem_id`: The ID of the problem. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the owner of the resource group affected by the + problem. """ function describe_problem(ProblemId; aws_config::AbstractAWSConfig=global_aws_config()) return application_insights( @@ -623,6 +703,9 @@ Describes the anomalies or errors associated with the problem. # Arguments - `problem_id`: The ID of the problem. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_problem_observations( ProblemId; aws_config::AbstractAWSConfig=global_aws_config() @@ -649,6 +732,63 @@ function describe_problem_observations( ) end +""" + describe_workload(component_name, resource_group_name, workload_id) + describe_workload(component_name, resource_group_name, workload_id, params::Dict{String,<:Any}) + +Describes a workload and its configuration. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_id`: The ID of the workload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the workload owner. +""" +function describe_workload( + ComponentName, + ResourceGroupName, + WorkloadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "DescribeWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workload( + ComponentName, + ResourceGroupName, + WorkloadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "DescribeWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_applications() list_applications(params::Dict{String,<:Any}) @@ -657,6 +797,7 @@ Lists the IDs of the applications that you are monitoring. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -685,6 +826,7 @@ Lists the auto-grouped, standalone, and custom components of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -728,6 +870,7 @@ quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"EndTime"`: The end time of the event. - `"EventStatus"`: The status of the configuration update event. Possible values include INFO, WARN, and ERROR. @@ -771,6 +914,7 @@ Lists the log pattern sets in the specific application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -813,6 +957,7 @@ Lists the log patterns in the specific log LogPatternSet. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -853,6 +998,7 @@ Lists the problems with your application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"ComponentName"`: The name of the component. - `"EndTime"`: The time when the problem ended, in epoch seconds. If not specified, problems within the past seven days are returned. @@ -862,6 +1008,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceGroupName"`: The name of the resource group. - `"StartTime"`: The time when the problem was detected, in epoch seconds. If you don't specify a time frame for the request, problems within the past seven days are returned. +- `"Visibility"`: Specifies whether or not you can view the problem. If not specified, + visible and ignored problems are returned. """ function list_problems(; aws_config::AbstractAWSConfig=global_aws_config()) return application_insights( @@ -916,6 +1064,112 @@ function list_tags_for_resource( ) end +""" + list_workloads(component_name, resource_group_name) + list_workloads(component_name, resource_group_name, params::Dict{String,<:Any}) + +Lists the workloads that are configured on a given component. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID of the owner of the workload. +- `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned NextToken value. +- `"NextToken"`: The token to request the next page of results. +""" +function list_workloads( + ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_insights( + "ListWorkloads", + Dict{String,Any}( + "ComponentName" => ComponentName, "ResourceGroupName" => ResourceGroupName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workloads( + ComponentName, + ResourceGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "ListWorkloads", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + remove_workload(component_name, resource_group_name, workload_id) + remove_workload(component_name, resource_group_name, workload_id, params::Dict{String,<:Any}) + +Remove workload from a component. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_id`: The ID of the workload. + +""" +function remove_workload( + ComponentName, + ResourceGroupName, + WorkloadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "RemoveWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_workload( + ComponentName, + ResourceGroupName, + WorkloadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "RemoveWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1019,6 +1273,8 @@ Updates the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttachMissingPermission"`: If set to true, the managed policies for SSM and CW will be + attached to the instance roles if they are missing. - `"AutoConfigEnabled"`: Turns auto-configuration on or off. - `"CWEMonitorEnabled"`: Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated, failed deployment, and @@ -1231,3 +1487,101 @@ function update_log_pattern( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_problem(problem_id) + update_problem(problem_id, params::Dict{String,<:Any}) + +Updates the visibility of the problem or specifies the problem as RESOLVED. + +# Arguments +- `problem_id`: The ID of the problem. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"UpdateStatus"`: The status of the problem. Arguments can be passed for only problems + that show a status of RECOVERING. +- `"Visibility"`: The visibility of a problem. When you pass a value of IGNORED, the + problem is removed from the default view, and all notifications for the problem are + suspended. When VISIBLE is passed, the IGNORED action is reversed. +""" +function update_problem(ProblemId; aws_config::AbstractAWSConfig=global_aws_config()) + return application_insights( + "UpdateProblem", + Dict{String,Any}("ProblemId" => ProblemId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_problem( + ProblemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateProblem", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ProblemId" => ProblemId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_workload(component_name, resource_group_name, workload_configuration) + update_workload(component_name, resource_group_name, workload_configuration, params::Dict{String,<:Any}) + +Adds a workload to a component. Each component can have at most five workloads. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_configuration`: The configuration settings of the workload. The value is the + escaped JSON of the configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WorkloadId"`: The ID of the workload. +""" +function update_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/application_signals.jl b/src/services/application_signals.jl new file mode 100644 index 0000000000..00ef2fb82c --- /dev/null +++ b/src/services/application_signals.jl @@ -0,0 +1,754 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: application_signals +using AWS.Compat +using AWS.UUIDs + +""" + batch_get_service_level_objective_budget_report(slo_ids, timestamp) + batch_get_service_level_objective_budget_report(slo_ids, timestamp, params::Dict{String,<:Any}) + +Use this operation to retrieve one or more service level objective (SLO) budget reports. An +error budget is the amount of time in unhealthy periods that your service can accumulate +during an interval before your overall SLO budget health is breached and the SLO is +considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly +interval translates to an error budget of 21.9 minutes of downtime in a 30-day month. +Budget reports include a health indicator, the attainment value, and remaining budget. For +more information about SLO error budgets, see SLO concepts. + +# Arguments +- `slo_ids`: An array containing the IDs of the service level objectives that you want to + include in the report. +- `timestamp`: The date and time that you want the report to be for. It is expressed as the + number of milliseconds since Jan 1, 1970 00:00:00 UTC. + +""" +function batch_get_service_level_objective_budget_report( + SloIds, Timestamp; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/budget-report", + Dict{String,Any}("SloIds" => SloIds, "Timestamp" => Timestamp); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_service_level_objective_budget_report( + SloIds, + Timestamp, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/budget-report", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SloIds" => SloIds, "Timestamp" => Timestamp), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_service_level_objective(name, sli_config) + create_service_level_objective(name, sli_config, params::Dict{String,<:Any}) + +Creates a service level objective (SLO), which can help you ensure that your critical +business operations are meeting customer expectations. Use SLOs to set and track specific +target levels for the reliability and availability of your applications and services. SLOs +use service level indicators (SLIs) to calculate whether the application is performing at +the level that you want. Create an SLO to set a target for a service or operation’s +availability or latency. CloudWatch measures this target frequently you can find whether it +has been breached. When you create an SLO, you set an attainment goal for it. An +attainment goal is the ratio of good periods that meet the threshold requirements to the +total periods within the interval. For example, an attainment goal of 99.9% means that +within your interval, you are targeting 99.9% of the periods to be in healthy state. After +you have created an SLO, you can retrieve error budget reports for it. An error budget is +the number of periods or amount of time that your service can accumulate during an interval +before your overall SLO budget health is breached and the SLO is considered to be unmet. +for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms +every month translates to an error budget of 21.9 minutes of downtime per month. When you +call this operation, Application Signals creates the +AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already +exist in your account. This service- linked role has the following permissions: +xray:GetServiceGraph logs:StartQuery logs:GetQueryResults +cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources +autoscaling:DescribeAutoScalingGroups You can easily set SLO targets for your +applications that are discovered by Application Signals, using critical metrics such as +latency and availability. You can also set SLOs against any CloudWatch metric or math +expression that produces a time series. For more information about SLOs, see Service level +objectives (SLOs). + +# Arguments +- `name`: A name for this SLO. +- `sli_config`: A structure that contains information about what service and what + performance metric that this SLO will monitor. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: An optional description for this SLO. +- `"Goal"`: A structure that contains the attributes that determine the goal of the SLO. + This includes the time period for evaluation and the attainment threshold. +- `"Tags"`: A list of key-value pairs to associate with the SLO. You can associate as many + as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, + you must have the cloudwatch:TagResource permission. Tags can help you organize and + categorize your resources. You can also use them to scope user permissions by granting a + user permission to access or change only resources with certain tag values. +""" +function create_service_level_objective( + Name, SliConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/slo", + Dict{String,Any}("Name" => Name, "SliConfig" => SliConfig); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_service_level_objective( + Name, + SliConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/slo", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("Name" => Name, "SliConfig" => SliConfig), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_service_level_objective(id) + delete_service_level_objective(id, params::Dict{String,<:Any}) + +Deletes the specified service level objective. + +# Arguments +- `id`: The ARN or name of the service level objective to delete. + +""" +function delete_service_level_objective( + Id; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "DELETE", "/slo/$(Id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_service_level_objective( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "DELETE", + "/slo/$(Id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_service(end_time, key_attributes, start_time) + get_service(end_time, key_attributes, start_time, params::Dict{String,<:Any}) + +Returns information about a service discovered by Application Signals. + +# Arguments +- `end_time`: The end of the time period to retrieve information about. When used in a raw + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 +- `key_attributes`: Use this field to specify which service you want to retrieve + information for. You must specify at least the Type, Name, and Environment attributes. This + is a string-to-string map. It can include the following fields. Type designates the type + of object this is. ResourceType specifies the type of the resource. This field is used + only when the value of the Type field is Resource or AWS::Resource. Name specifies the + name of the object. This is used only if the value of the Type field is Service, + RemoteService, or AWS::Service. Identifier identifies the resource objects of this + resource. This is used only if the value of the Type field is Resource or AWS::Resource. + Environment specifies the location where this object is hosted, or what it belongs to. +- `start_time`: The start of the time period to retrieve information about. When used in a + raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + +""" +function get_service( + EndTime, KeyAttributes, StartTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/service", + Dict{String,Any}( + "EndTime" => EndTime, "KeyAttributes" => KeyAttributes, "StartTime" => StartTime + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_service( + EndTime, + KeyAttributes, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/service", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, + "KeyAttributes" => KeyAttributes, + "StartTime" => StartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_service_level_objective(id) + get_service_level_objective(id, params::Dict{String,<:Any}) + +Returns information about one SLO created in the account. + +# Arguments +- `id`: The ARN or name of the SLO that you want to retrieve information about. You can + find the ARNs of SLOs by using the ListServiceLevelObjectives operation. + +""" +function get_service_level_objective(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return application_signals( + "GET", "/slo/$(Id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_service_level_objective( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "GET", "/slo/$(Id)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_service_dependencies(end_time, key_attributes, start_time) + list_service_dependencies(end_time, key_attributes, start_time, params::Dict{String,<:Any}) + +Returns a list of service dependencies of the service that you specify. A dependency is an +infrastructure component that an operation of this service connects with. Dependencies can +include Amazon Web Services services, Amazon Web Services resources, and third-party +services. + +# Arguments +- `end_time`: The end of the time period to retrieve information about. When used in a raw + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 +- `key_attributes`: Use this field to specify which service you want to retrieve + information for. You must specify at least the Type, Name, and Environment attributes. This + is a string-to-string map. It can include the following fields. Type designates the type + of object this is. ResourceType specifies the type of the resource. This field is used + only when the value of the Type field is Resource or AWS::Resource. Name specifies the + name of the object. This is used only if the value of the Type field is Service, + RemoteService, or AWS::Service. Identifier identifies the resource objects of this + resource. This is used only if the value of the Type field is Resource or AWS::Resource. + Environment specifies the location where this object is hosted, or what it belongs to. +- `start_time`: The start of the time period to retrieve information about. When used in a + raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in one operation. If you omit + this parameter, the default of 50 is used. +- `"NextToken"`: Include this value, if it was returned by the previous operation, to get + the next set of service dependencies. +""" +function list_service_dependencies( + EndTime, KeyAttributes, StartTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/service-dependencies", + Dict{String,Any}( + "EndTime" => EndTime, "KeyAttributes" => KeyAttributes, "StartTime" => StartTime + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_service_dependencies( + EndTime, + KeyAttributes, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/service-dependencies", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, + "KeyAttributes" => KeyAttributes, + "StartTime" => StartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_service_dependents(end_time, key_attributes, start_time) + list_service_dependents(end_time, key_attributes, start_time, params::Dict{String,<:Any}) + +Returns the list of dependents that invoked the specified service during the provided time +range. Dependents include other services, CloudWatch Synthetics canaries, and clients that +are instrumented with CloudWatch RUM app monitors. + +# Arguments +- `end_time`: The end of the time period to retrieve information about. When used in a raw + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 +- `key_attributes`: Use this field to specify which service you want to retrieve + information for. You must specify at least the Type, Name, and Environment attributes. This + is a string-to-string map. It can include the following fields. Type designates the type + of object this is. ResourceType specifies the type of the resource. This field is used + only when the value of the Type field is Resource or AWS::Resource. Name specifies the + name of the object. This is used only if the value of the Type field is Service, + RemoteService, or AWS::Service. Identifier identifies the resource objects of this + resource. This is used only if the value of the Type field is Resource or AWS::Resource. + Environment specifies the location where this object is hosted, or what it belongs to. +- `start_time`: The start of the time period to retrieve information about. When used in a + raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in one operation. If you omit + this parameter, the default of 50 is used. +- `"NextToken"`: Include this value, if it was returned by the previous operation, to get + the next set of service dependents. +""" +function list_service_dependents( + EndTime, KeyAttributes, StartTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/service-dependents", + Dict{String,Any}( + "EndTime" => EndTime, "KeyAttributes" => KeyAttributes, "StartTime" => StartTime + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_service_dependents( + EndTime, + KeyAttributes, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/service-dependents", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, + "KeyAttributes" => KeyAttributes, + "StartTime" => StartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_service_level_objectives() + list_service_level_objectives(params::Dict{String,<:Any}) + +Returns a list of SLOs created in this account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"KeyAttributes"`: You can use this optional field to specify which services you want to + retrieve SLO information for. This is a string-to-string map. It can include the following + fields. Type designates the type of object this is. ResourceType specifies the type + of the resource. This field is used only when the value of the Type field is Resource or + AWS::Resource. Name specifies the name of the object. This is used only if the value of + the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the + resource objects of this resource. This is used only if the value of the Type field is + Resource or AWS::Resource. Environment specifies the location where this object is + hosted, or what it belongs to. +- `"MaxResults"`: The maximum number of results to return in one operation. If you omit + this parameter, the default of 50 is used. +- `"NextToken"`: Include this value, if it was returned by the previous operation, to get + the next set of service level objectives. +- `"OperationName"`: The name of the operation that this SLO is associated with. +""" +function list_service_level_objectives(; aws_config::AbstractAWSConfig=global_aws_config()) + return application_signals( + "POST", "/slos"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_service_level_objectives( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", "/slos", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_service_operations(end_time, key_attributes, start_time) + list_service_operations(end_time, key_attributes, start_time, params::Dict{String,<:Any}) + +Returns a list of the operations of this service that have been discovered by Application +Signals. Only the operations that were invoked during the specified time range are returned. + +# Arguments +- `end_time`: The end of the time period to retrieve information about. When used in a raw + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 +- `key_attributes`: Use this field to specify which service you want to retrieve + information for. You must specify at least the Type, Name, and Environment attributes. This + is a string-to-string map. It can include the following fields. Type designates the type + of object this is. ResourceType specifies the type of the resource. This field is used + only when the value of the Type field is Resource or AWS::Resource. Name specifies the + name of the object. This is used only if the value of the Type field is Service, + RemoteService, or AWS::Service. Identifier identifies the resource objects of this + resource. This is used only if the value of the Type field is Resource or AWS::Resource. + Environment specifies the location where this object is hosted, or what it belongs to. +- `start_time`: The start of the time period to retrieve information about. When used in a + raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in one operation. If you omit + this parameter, the default of 50 is used. +- `"NextToken"`: Include this value, if it was returned by the previous operation, to get + the next set of service operations. +""" +function list_service_operations( + EndTime, KeyAttributes, StartTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/service-operations", + Dict{String,Any}( + "EndTime" => EndTime, "KeyAttributes" => KeyAttributes, "StartTime" => StartTime + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_service_operations( + EndTime, + KeyAttributes, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/service-operations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, + "KeyAttributes" => KeyAttributes, + "StartTime" => StartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_services(end_time, start_time) + list_services(end_time, start_time, params::Dict{String,<:Any}) + +Returns a list of services that have been discovered by Application Signals. A service +represents a minimum logical and transactional unit that completes a business function. +Services are discovered through Application Signals instrumentation. + +# Arguments +- `end_time`: The end of the time period to retrieve information about. When used in a raw + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 +- `start_time`: The start of the time period to retrieve information about. When used in a + raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in one operation. If you omit + this parameter, the default of 50 is used. +- `"NextToken"`: Include this value, if it was returned by the previous operation, to get + the next set of services. +""" +function list_services( + EndTime, StartTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "GET", + "/services", + Dict{String,Any}("EndTime" => EndTime, "StartTime" => StartTime); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_services( + EndTime, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "GET", + "/services", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("EndTime" => EndTime, "StartTime" => StartTime), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Displays the tags associated with a CloudWatch resource. Tags can be assigned to service +level objectives. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the CloudWatch resource that you want + to view tags for. The ARN format of an Application Signals SLO is + arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, + see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General + Reference. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "GET", + "/tags", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "GET", + "/tags", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_discovery() + start_discovery(params::Dict{String,<:Any}) + +Enables this Amazon Web Services account to be able to use CloudWatch Application Signals +by creating the AWSServiceRoleForCloudWatchApplicationSignals service-linked role. This +service- linked role has the following permissions: xray:GetServiceGraph +logs:StartQuery logs:GetQueryResults cloudwatch:GetMetricData +cloudwatch:ListMetrics tag:GetResources autoscaling:DescribeAutoScalingGroups +After completing this step, you still need to instrument your Java and Python applications +to send data to Application Signals. For more information, see Enabling Application +Signals. + +""" +function start_discovery(; aws_config::AbstractAWSConfig=global_aws_config()) + return application_signals( + "POST", "/start-discovery"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function start_discovery( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/start-discovery", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified CloudWatch resource, such as a +service level objective. Tags can help you organize and categorize your resources. You can +also use them to scope user permissions by granting a user permission to access or change +only resources with certain tag values. Tags don't have any semantic meaning to Amazon Web +Services and are interpreted strictly as strings of characters. You can use the TagResource +action with an alarm that already has tags. If you specify a new tag key for the alarm, +this tag is appended to the list of tags associated with the alarm. If you specify a tag +key that is already associated with the alarm, the new tag value that you specify replaces +the previous value for that tag. You can associate as many as 50 tags with a CloudWatch +resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the CloudWatch resource that you want + to set tags for. The ARN format of an Application Signals SLO is + arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, + see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General + Reference. +- `tags`: The list of key-value pairs to associate with the alarm. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return application_signals( + "POST", + "/tag-resource", + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/tag-resource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags from the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the CloudWatch resource that you want + to delete tags from. The ARN format of an Application Signals SLO is + arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, + see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General + Reference. +- `tag_keys`: The list of tag keys to remove from the resource. + +""" +function untag_resource( + ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "POST", + "/untag-resource", + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_signals( + "POST", + "/untag-resource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_service_level_objective(id) + update_service_level_objective(id, params::Dict{String,<:Any}) + +Updates an existing service level objective (SLO). If you omit parameters, the previous +values of those parameters are retained. + +# Arguments +- `id`: The Amazon Resource Name (ARN) or name of the service level objective that you want + to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: An optional description for the SLO. +- `"Goal"`: A structure that contains the attributes that determine the goal of the SLO. + This includes the time period for evaluation and the attainment threshold. +- `"SliConfig"`: A structure that contains information about what performance metric this + SLO will monitor. +""" +function update_service_level_objective( + Id; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "PATCH", "/slo/$(Id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_service_level_objective( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_signals( + "PATCH", + "/slo/$(Id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/apprunner.jl b/src/services/apprunner.jl index 84402dcad7..c7b3f2dc41 100644 --- a/src/services/apprunner.jl +++ b/src/services/apprunner.jl @@ -79,12 +79,16 @@ responsiveness during peak demand. - `auto_scaling_configuration_name`: A name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number 1 of this name. When you use the same name in subsequent calls, App Runner creates - incremental revisions of the configuration. The name DefaultConfiguration is reserved - (it's the configuration that App Runner uses if you don't provide a custome one). You can't - use it to create a new auto scaling configuration, and you can't create a revision of it. - When you want to use your own auto scaling configuration for your App Runner service, - create a configuration with a different name, and then provide it when you create or update - your service. + incremental revisions of the configuration. Prior to the release of Auto scale + configuration enhancements, the name DefaultConfiguration was reserved. This restriction + is no longer in place. You can now manage DefaultConfiguration the same way you manage your + custom auto scaling configurations. This means you can do the following with the + DefaultConfiguration that App Runner provides: Create new revisions of the + DefaultConfiguration. Delete the revisions of the DefaultConfiguration. Delete the auto + scaling configuration for which the App Runner DefaultConfiguration was created. If you + delete the auto scaling configuration you can create another custom auto scaling + configuration with the same DefaultConfiguration name. The original DefaultConfiguration + resource provided by App Runner remains in your account unless you make changes to it. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -141,8 +145,8 @@ end Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services. A connection resource is -needed to access GitHub repositories. GitHub requires a user interface approval process -through the App Runner console before you can use the connection. +needed to access GitHub and Bitbucket repositories. Both require a user interface approval +process through the App Runner console before you can use the connection. # Arguments - `connection_name`: A name for the new connection. It must be unique across all App Runner @@ -453,9 +457,10 @@ end delete_auto_scaling_configuration(auto_scaling_configuration_arn) delete_auto_scaling_configuration(auto_scaling_configuration_arn, params::Dict{String,<:Any}) -Delete an App Runner automatic scaling configuration resource. You can delete a specific -revision or the latest active revision. You can't delete a configuration that's used by one -or more App Runner services. +Delete an App Runner automatic scaling configuration resource. You can delete a top level +auto scaling configuration, a specific revision of one, or all revisions associated with +the top level configuration. You can't delete the default auto scaling configuration or a +configuration that's used by one or more App Runner services. # Arguments - `auto_scaling_configuration_arn`: The Amazon Resource Name (ARN) of the App Runner auto @@ -463,6 +468,11 @@ or more App Runner services. configuration ARN, or a partial ARN ending with either .../name or .../name/revision . If a revision isn't specified, the latest active revision is deleted. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeleteAllRevisions"`: Set to true to delete all of the revisions associated with the + AutoScalingConfigurationArn parameter value. When DeleteAllRevisions is set to true, the + only valid value for the Amazon Resource Name (ARN) is a partial ARN ending with: .../name. """ function delete_auto_scaling_configuration( AutoScalingConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1182,6 +1192,59 @@ function list_services( ) end +""" + list_services_for_auto_scaling_configuration(auto_scaling_configuration_arn) + list_services_for_auto_scaling_configuration(auto_scaling_configuration_arn, params::Dict{String,<:Any}) + +Returns a list of the associated App Runner services using an auto scaling configuration. + +# Arguments +- `auto_scaling_configuration_arn`: The Amazon Resource Name (ARN) of the App Runner auto + scaling configuration that you want to list the services for. The ARN can be a full auto + scaling configuration ARN, or a partial ARN ending with either .../name or + .../name/revision . If a revision isn't specified, the latest active revision is used. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to include in each response (result page). + It's used for a paginated request. If you don't specify MaxResults, the request retrieves + all available results in a single response. +- `"NextToken"`: A token from a previous result page. It's used for a paginated request. + The request retrieves the next result page. All other parameter values must be identical to + the ones specified in the initial request. If you don't specify NextToken, the request + retrieves the first result page. +""" +function list_services_for_auto_scaling_configuration( + AutoScalingConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return apprunner( + "ListServicesForAutoScalingConfiguration", + Dict{String,Any}("AutoScalingConfigurationArn" => AutoScalingConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_services_for_auto_scaling_configuration( + AutoScalingConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apprunner( + "ListServicesForAutoScalingConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AutoScalingConfigurationArn" => AutoScalingConfigurationArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1479,6 +1542,52 @@ function untag_resource( ) end +""" + update_default_auto_scaling_configuration(auto_scaling_configuration_arn) + update_default_auto_scaling_configuration(auto_scaling_configuration_arn, params::Dict{String,<:Any}) + +Update an auto scaling configuration to be the default. The existing default auto scaling +configuration will be set to non-default automatically. + +# Arguments +- `auto_scaling_configuration_arn`: The Amazon Resource Name (ARN) of the App Runner auto + scaling configuration that you want to set as the default. The ARN can be a full auto + scaling configuration ARN, or a partial ARN ending with either .../name or + .../name/revision . If a revision isn't specified, the latest active revision is set as the + default. + +""" +function update_default_auto_scaling_configuration( + AutoScalingConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return apprunner( + "UpdateDefaultAutoScalingConfiguration", + Dict{String,Any}("AutoScalingConfigurationArn" => AutoScalingConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_default_auto_scaling_configuration( + AutoScalingConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apprunner( + "UpdateDefaultAutoScalingConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AutoScalingConfigurationArn" => AutoScalingConfigurationArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_service(service_arn) update_service(service_arn, params::Dict{String,<:Any}) diff --git a/src/services/appstream.jl b/src/services/appstream.jl index 1f15795c3f..b14cd3d09e 100644 --- a/src/services/appstream.jl +++ b/src/services/appstream.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: appstream using AWS.Compat using AWS.UUIDs +""" + associate_app_block_builder_app_block(app_block_arn, app_block_builder_name) + associate_app_block_builder_app_block(app_block_arn, app_block_builder_name, params::Dict{String,<:Any}) + +Associates the specified app block builder with the specified app block. + +# Arguments +- `app_block_arn`: The ARN of the app block. +- `app_block_builder_name`: The name of the app block builder. + +""" +function associate_app_block_builder_app_block( + AppBlockArn, AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "AssociateAppBlockBuilderAppBlock", + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, "AppBlockBuilderName" => AppBlockBuilderName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_app_block_builder_app_block( + AppBlockArn, + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "AssociateAppBlockBuilderAppBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, + "AppBlockBuilderName" => AppBlockBuilderName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_application_fleet(application_arn, fleet_name) associate_application_fleet(application_arn, fleet_name, params::Dict{String,<:Any}) @@ -284,8 +330,8 @@ function copy_image( end """ - create_app_block(name, setup_script_details, source_s3_location) - create_app_block(name, setup_script_details, source_s3_location, params::Dict{String,<:Any}) + create_app_block(name, source_s3_location) + create_app_block(name, source_s3_location, params::Dict{String,<:Any}) Creates an app block. App blocks are an Amazon AppStream 2.0 resource that stores the details about the virtual hard disk in an S3 bucket. It also stores the setup script with @@ -296,48 +342,124 @@ fleets. # Arguments - `name`: The name of the app block. -- `setup_script_details`: The setup script details of the app block. - `source_s3_location`: The source S3 location of the app block. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: The description of the app block. - `"DisplayName"`: The display name of the app block. This is not displayed to the user. +- `"PackagingType"`: The packaging type of the app block. +- `"PostSetupScriptDetails"`: The post setup script details of the app block. This can only + be provided for the APPSTREAM2 PackagingType. +- `"SetupScriptDetails"`: The setup script details of the app block. This must be provided + for the CUSTOM PackagingType. - `"Tags"`: The tags assigned to the app block. """ +function create_app_block( + Name, SourceS3Location; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "CreateAppBlock", + Dict{String,Any}("Name" => Name, "SourceS3Location" => SourceS3Location); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end function create_app_block( Name, - SetupScriptDetails, - SourceS3Location; + SourceS3Location, + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return appstream( "CreateAppBlock", Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Name" => Name, "SourceS3Location" => SourceS3Location), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_block_builder(instance_type, name, platform, vpc_config) + create_app_block_builder(instance_type, name, platform, vpc_config, params::Dict{String,<:Any}) + +Creates an app block builder. + +# Arguments +- `instance_type`: The instance type to use when launching the app block builder. The + following instance types are available: stream.standard.small stream.standard.medium + stream.standard.large stream.standard.xlarge stream.standard.2xlarge +- `name`: The unique name for the app block builder. +- `platform`: The platform of the app block builder. WINDOWS_SERVER_2019 is the only valid + value. +- `vpc_config`: The VPC configuration for the app block builder. App block builders require + that you specify at least two subnets in different availability zones. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessEndpoints"`: The list of interface VPC endpoint (interface endpoint) objects. + Administrators can connect to the app block builder only through the specified endpoints. +- `"Description"`: The description of the app block builder. +- `"DisplayName"`: The display name of the app block builder. +- `"EnableDefaultInternetAccess"`: Enables or disables default internet access for the app + block builder. +- `"IamRoleArn"`: The Amazon Resource Name (ARN) of the IAM role to apply to the app block + builder. To assume a role, the app block builder calls the AWS Security Token Service (STS) + AssumeRole API operation and passes the ARN of the role to use. The operation creates a new + session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and + creates the appstream_machine_role credential profile on the instance. For more + information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running + on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. +- `"Tags"`: The tags to associate with the app block builder. A tag is a key-value pair, + and the value is optional. For example, Environment=Test. If you do not specify a value, + Environment=. If you do not specify a value, the value is set to an empty string. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following special characters: _ . : / = + - @ For more information, see Tagging Your + Resources in the Amazon AppStream 2.0 Administration Guide. +""" +function create_app_block_builder( + InstanceType, + Name, + Platform, + VpcConfig; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateAppBlockBuilder", + Dict{String,Any}( + "InstanceType" => InstanceType, "Name" => Name, - "SetupScriptDetails" => SetupScriptDetails, - "SourceS3Location" => SourceS3Location, + "Platform" => Platform, + "VpcConfig" => VpcConfig, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_app_block( +function create_app_block_builder( + InstanceType, Name, - SetupScriptDetails, - SourceS3Location, + Platform, + VpcConfig, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return appstream( - "CreateAppBlock", + "CreateAppBlockBuilder", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( + "InstanceType" => InstanceType, "Name" => Name, - "SetupScriptDetails" => SetupScriptDetails, - "SourceS3Location" => SourceS3Location, + "Platform" => Platform, + "VpcConfig" => VpcConfig, ), params, ), @@ -347,6 +469,49 @@ function create_app_block( ) end +""" + create_app_block_builder_streaming_url(app_block_builder_name) + create_app_block_builder_streaming_url(app_block_builder_name, params::Dict{String,<:Any}) + +Creates a URL to start a create app block builder streaming session. + +# Arguments +- `app_block_builder_name`: The name of the app block builder. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Validity"`: The time that the streaming URL will be valid, in seconds. Specify a value + between 1 and 604800 seconds. The default is 3600 seconds. +""" +function create_app_block_builder_streaming_url( + AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "CreateAppBlockBuilderStreamingURL", + Dict{String,Any}("AppBlockBuilderName" => AppBlockBuilderName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_block_builder_streaming_url( + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateAppBlockBuilderStreamingURL", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AppBlockBuilderName" => AppBlockBuilderName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_application(app_block_arn, icon_s3_location, instance_families, launch_path, name, platforms) create_application(app_block_arn, icon_s3_location, instance_families, launch_path, name, platforms, params::Dict{String,<:Any}) @@ -639,11 +804,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ImageName"`: The name of the image used to create the fleet. - `"MaxConcurrentSessions"`: The maximum concurrent sessions of the Elastic fleet. This is required for Elastic fleets, and not allowed for other fleet types. +- `"MaxSessionsPerInstance"`: The maximum number of user sessions on an instance. This only + applies to multi-session fleets. - `"MaxUserDurationInSeconds"`: The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a - new instance. Specify a value between 600 and 360000. + new instance. Specify a value between 600 and 432000. - `"Platform"`: The fleet platform. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets. - `"SessionScriptS3Location"`: The S3 location of the session scripts configuration zip @@ -1103,6 +1270,36 @@ function delete_app_block( ) end +""" + delete_app_block_builder(name) + delete_app_block_builder(name, params::Dict{String,<:Any}) + +Deletes an app block builder. An app block builder can only be deleted when it has no +association with an app block. + +# Arguments +- `name`: The name of the app block builder. + +""" +function delete_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "DeleteAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DeleteAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_application(name) delete_application(name, params::Dict{String,<:Any}) @@ -1442,6 +1639,69 @@ function delete_user( ) end +""" + describe_app_block_builder_app_block_associations() + describe_app_block_builder_app_block_associations(params::Dict{String,<:Any}) + +Retrieves a list that describes one or more app block builder associations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AppBlockArn"`: The ARN of the app block. +- `"AppBlockBuilderName"`: The name of the app block builder. +- `"MaxResults"`: The maximum size of each page of results. +- `"NextToken"`: The pagination token used to retrieve the next page of results for this + operation. +""" +function describe_app_block_builder_app_block_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilderAppBlockAssociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_app_block_builder_app_block_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilderAppBlockAssociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_app_block_builders() + describe_app_block_builders(params::Dict{String,<:Any}) + +Retrieves a list that describes one or more app block builders. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum size of each page of results. The maximum value is 25. +- `"Names"`: The names of the app block builders. +- `"NextToken"`: The pagination token used to retrieve the next page of results for this + operation. +""" +function describe_app_block_builders(; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "DescribeAppBlockBuilders"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_app_block_builders( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_app_blocks() describe_app_blocks(params::Dict{String,<:Any}) @@ -1746,6 +2006,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AuthenticationType"`: The authentication method. Specify API for a user authenticated using a streaming URL or SAML for a SAML federated user. The default is to authenticate users using a streaming URL. +- `"InstanceId"`: The identifier for the instance hosting the session. - `"Limit"`: The size of each page of results. The default value is 20 and the maximum value is 50. - `"NextToken"`: The pagination token to use to retrieve the next page of results for this @@ -1969,6 +2230,52 @@ function disable_user( ) end +""" + disassociate_app_block_builder_app_block(app_block_arn, app_block_builder_name) + disassociate_app_block_builder_app_block(app_block_arn, app_block_builder_name, params::Dict{String,<:Any}) + +Disassociates a specified app block builder from a specified app block. + +# Arguments +- `app_block_arn`: The ARN of the app block. +- `app_block_builder_name`: The name of the app block builder. + +""" +function disassociate_app_block_builder_app_block( + AppBlockArn, AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DisassociateAppBlockBuilderAppBlock", + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, "AppBlockBuilderName" => AppBlockBuilderName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_app_block_builder_app_block( + AppBlockArn, + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "DisassociateAppBlockBuilderAppBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, + "AppBlockBuilderName" => AppBlockBuilderName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_application_fleet(application_arn, fleet_name) disassociate_application_fleet(application_arn, fleet_name, params::Dict{String,<:Any}) @@ -2353,6 +2660,37 @@ function list_tags_for_resource( ) end +""" + start_app_block_builder(name) + start_app_block_builder(name, params::Dict{String,<:Any}) + +Starts an app block builder. An app block builder can only be started when it's associated +with an app block. Starting an app block builder starts a new instance, which is equivalent +to an elastic fleet instance with application builder assistance functionality. + +# Arguments +- `name`: The name of the app block builder. + +""" +function start_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "StartAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "StartAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_fleet(name) start_fleet(name, params::Dict{String,<:Any}) @@ -2415,6 +2753,36 @@ function start_image_builder( ) end +""" + stop_app_block_builder(name) + stop_app_block_builder(name, params::Dict{String,<:Any}) + +Stops an app block builder. Stopping an app block builder terminates the instance, and the +instance state is not persisted. + +# Arguments +- `name`: The name of the app block builder. + +""" +function stop_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "StopAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "StopAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_fleet(name) stop_fleet(name, params::Dict{String,<:Any}) @@ -2564,6 +2932,61 @@ function untag_resource( ) end +""" + update_app_block_builder(name) + update_app_block_builder(name, params::Dict{String,<:Any}) + +Updates an app block builder. If the app block builder is in the STARTING or STOPPING +state, you can't update it. If the app block builder is in the RUNNING state, you can only +update the DisplayName and Description. If the app block builder is in the STOPPED state, +you can update any attribute except the Name. + +# Arguments +- `name`: The unique name for the app block builder. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessEndpoints"`: The list of interface VPC endpoint (interface endpoint) objects. + Administrators can connect to the app block builder only through the specified endpoints. +- `"AttributesToDelete"`: The attributes to delete from the app block builder. +- `"Description"`: The description of the app block builder. +- `"DisplayName"`: The display name of the app block builder. +- `"EnableDefaultInternetAccess"`: Enables or disables default internet access for the app + block builder. +- `"IamRoleArn"`: The Amazon Resource Name (ARN) of the IAM role to apply to the app block + builder. To assume a role, the app block builder calls the AWS Security Token Service (STS) + AssumeRole API operation and passes the ARN of the role to use. The operation creates a new + session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and + creates the appstream_machine_role credential profile on the instance. For more + information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running + on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. +- `"InstanceType"`: The instance type to use when launching the app block builder. The + following instance types are available: stream.standard.small stream.standard.medium + stream.standard.large stream.standard.xlarge stream.standard.2xlarge +- `"Platform"`: The platform of the app block builder. WINDOWS_SERVER_2019 is the only + valid value. +- `"VpcConfig"`: The VPC configuration for the app block builder. App block builders + require that you specify at least two subnets in different availability zones. +""" +function update_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "UpdateAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "UpdateAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_application(name) update_application(name, params::Dict{String,<:Any}) @@ -2774,11 +3197,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys stream.standard.medium stream.standard.large stream.standard.xlarge stream.standard.2xlarge - `"MaxConcurrentSessions"`: The maximum number of concurrent sessions for a fleet. +- `"MaxSessionsPerInstance"`: The maximum number of user sessions on an instance. This only + applies to multi-session fleets. - `"MaxUserDurationInSeconds"`: The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a - new instance. Specify a value between 600 and 360000. + new instance. Specify a value between 600 and 432000. - `"Name"`: A unique name for the fleet. - `"Platform"`: The platform of the fleet. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets. diff --git a/src/services/appsync.jl b/src/services/appsync.jl index ed9f6f5bff..409b31b21b 100644 --- a/src/services/appsync.jl +++ b/src/services/appsync.jl @@ -175,6 +175,13 @@ Creates a cache for the GraphQL API. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"atRestEncryptionEnabled"`: At-rest encryption flag for cache. You cannot update this setting after creation. +- `"healthMetricsConfig"`: Controls how cache health metrics will be emitted to CloudWatch. + Cache health metrics include: NetworkBandwidthOutAllowanceExceeded: The network packets + dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for + diagnosing bottlenecks in a cache configuration. EngineCPUUtilization: The CPU + utilization (percentage) allocated to the Redis process. This is useful for diagnosing + bottlenecks in a cache configuration. Metrics will be recorded by API ID. You can set the + value to ENABLED or DISABLED. - `"transitEncryptionEnabled"`: Transit encryption flag when connecting to cache. You cannot update this setting after creation. """ @@ -274,6 +281,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"eventBridgeConfig"`: Amazon EventBridge settings. - `"httpConfig"`: HTTP endpoint settings. - `"lambdaConfig"`: Lambda settings. +- `"metricsConfig"`: Enables or disables enhanced data source metrics for specified data + sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior + value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to + FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can + still set its value. metricsConfig can be ENABLED or DISABLED. - `"openSearchServiceConfig"`: Amazon OpenSearch Service settings. - `"relationalDatabaseConfig"`: Relational database settings. - `"serviceRoleArn"`: The Identity and Access Management (IAM) service role Amazon Resource @@ -435,6 +447,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the GraphqlApi API. - `"apiType"`: The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED). +- `"enhancedMetricsConfig"`: The enhancedMetricsConfig object. +- `"introspectionConfig"`: Sets the value of the GraphQL API to enable (ENABLED) or disable + (DISABLED) introspection. If no value is provided, the introspection configuration will be + set to ENABLED by default. This field will produce an error if the operation attempts to + use the introspection feature while this field is disabled. For more information about + introspection, see GraphQL introspection. - `"lambdaAuthorizerConfig"`: Configuration for Lambda function authorization. - `"logConfig"`: The Amazon CloudWatch Logs configuration. - `"mergedApiExecutionRoleArn"`: The Identity and Access Management service role ARN for a @@ -444,6 +462,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"openIDConnectConfig"`: The OIDC configuration. - `"ownerContact"`: The owner contact information for an API resource. This field accepts any string input with a length of 0 - 256 characters. +- `"queryDepthLimit"`: The maximum depth a query can have in a single request. Depth refers + to the amount of nested levels allowed in the body of query. The default value is 0 (or + unspecified), which indicates there's no depth limit. If you set a limit, it can be between + 1 and 75 nested levels. This field will produce a limit error if the operation falls out of + bounds. Note that fields can still be set to nullable or non-nullable. If a non-nullable + field produces an error, the error will be thrown upwards to the first nullable field + available. +- `"resolverCountLimit"`: The maximum number of resolvers that can be invoked in a single + request. The default value is 0 (or unspecified), which will set the limit to 10000. When + specified, the limit value can be between 1 and 10000. This field will produce a limit + error if the operation falls out of bounds. - `"tags"`: A TagMap object. - `"userPoolConfig"`: The Amazon Cognito user pool configuration. - `"visibility"`: Sets the value of the GraphQL API to public (GLOBAL) or private @@ -509,6 +538,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys invoke a series of Function objects in a serial manner. You can use a pipeline resolver to run a GraphQL query against multiple data sources. - `"maxBatchSize"`: The maximum batching size for a resolver. +- `"metricsConfig"`: Enables or disables enhanced resolver metrics for specified resolvers. + Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set + to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to + FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can + still set its value. metricsConfig can be ENABLED or DISABLED. - `"pipelineConfig"`: The PipelineConfig. - `"requestMappingTemplate"`: The mapping template to use for requests. A resolver uses a request mapping template to convert a GraphQL expression into a format that a data source @@ -1210,6 +1244,53 @@ function get_data_source( ) end +""" + get_data_source_introspection(introspection_id) + get_data_source_introspection(introspection_id, params::Dict{String,<:Any}) + +Retrieves the record of an existing introspection. If the retrieval is successful, the +result of the instrospection will also be returned. If the retrieval fails the operation, +an error message will be returned instead. + +# Arguments +- `introspection_id`: The introspection ID. Each introspection contains a unique ID that + can be used to reference the instrospection record. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeModelsSDL"`: A boolean flag that determines whether SDL should be generated for + introspected types or not. If set to true, each model will contain an sdl property that + contains the SDL for that type. The SDL only contains the type data and no additional + metadata or directives. +- `"maxResults"`: The maximum number of introspected types that will be returned in a + single response. +- `"nextToken"`: Determines the number of types to be returned in a single response before + paginating. This value is typically taken from nextToken value from the previous response. +""" +function get_data_source_introspection( + introspectionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "GET", + "/v1/datasources/introspections/$(introspectionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source_introspection( + introspectionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appsync( + "GET", + "/v1/datasources/introspections/$(introspectionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_domain_name(domain_name) get_domain_name(domain_name, params::Dict{String,<:Any}) @@ -1303,6 +1384,39 @@ function get_graphql_api( ) end +""" + get_graphql_api_environment_variables(api_id) + get_graphql_api_environment_variables(api_id, params::Dict{String,<:Any}) + +Retrieves the list of environmental variable key-value pairs associated with an API by its +ID value. + +# Arguments +- `api_id`: The ID of the API from which the environmental variable list will be retrieved. + +""" +function get_graphql_api_environment_variables( + apiId; aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "GET", + "/v1/apis/$(apiId)/environmentVariables"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_graphql_api_environment_variables( + apiId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "GET", + "/v1/apis/$(apiId)/environmentVariables", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_introspection_schema(api_id, format) get_introspection_schema(api_id, format, params::Dict{String,<:Any}) @@ -1898,6 +2012,108 @@ function list_types_by_association( ) end +""" + put_graphql_api_environment_variables(api_id, environment_variables) + put_graphql_api_environment_variables(api_id, environment_variables, params::Dict{String,<:Any}) + +Creates a list of environmental variables in an API by its ID value. When creating an +environmental variable, it must follow the constraints below: Both JavaScript and VTL +templates support environmental variables. Environmental variables are not evaluated +before function invocation. Environmental variables only support string values. Any +defined value in an environmental variable is considered a string literal and not expanded. + Variable evaluations should ideally be performed in the function code. When creating an +environmental variable key-value pair, it must follow the additional constraints below: +Keys must begin with a letter. Keys must be at least two characters long. Keys can only +contain letters, numbers, and the underscore character (_). Values can be up to 512 +characters long. You can configure up to 50 key-value pairs in a GraphQL API. You can +create a list of environmental variables by adding it to the environmentVariables payload +as a list in the format {\"key1\":\"value1\",\"key2\":\"value2\", …}. Note that each call +of the PutGraphqlApiEnvironmentVariables action will result in the overwriting of the +existing environmental variable list of that API. This means the existing environmental +variables will be lost. To avoid this, you must include all existing and new environmental +variables in the list each time you call this action. + +# Arguments +- `api_id`: The ID of the API to which the environmental variable list will be written. +- `environment_variables`: The list of environmental variables to add to the API. When + creating an environmental variable key-value pair, it must follow the additional + constraints below: Keys must begin with a letter. Keys must be at least two characters + long. Keys can only contain letters, numbers, and the underscore character (_). Values + can be up to 512 characters long. You can configure up to 50 key-value pairs in a GraphQL + API. You can create a list of environmental variables by adding it to the + environmentVariables payload as a list in the format + {\"key1\":\"value1\",\"key2\":\"value2\", …}. Note that each call of the + PutGraphqlApiEnvironmentVariables action will result in the overwriting of the existing + environmental variable list of that API. This means the existing environmental variables + will be lost. To avoid this, you must include all existing and new environmental variables + in the list each time you call this action. + +""" +function put_graphql_api_environment_variables( + apiId, environmentVariables; aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "PUT", + "/v1/apis/$(apiId)/environmentVariables", + Dict{String,Any}("environmentVariables" => environmentVariables); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_graphql_api_environment_variables( + apiId, + environmentVariables, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appsync( + "PUT", + "/v1/apis/$(apiId)/environmentVariables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("environmentVariables" => environmentVariables), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_data_source_introspection() + start_data_source_introspection(params::Dict{String,<:Any}) + +Creates a new introspection. Returns the introspectionId of the new introspection after its +creation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"rdsDataApiConfig"`: The rdsDataApiConfig object data. +""" +function start_data_source_introspection(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "POST", + "/v1/datasources/introspections"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_data_source_introspection( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appsync( + "POST", + "/v1/datasources/introspections", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_schema_creation(api_id, definition) start_schema_creation(api_id, definition, params::Dict{String,<:Any}) @@ -2069,6 +2285,15 @@ Updates the cache for the GraphQL API. type. R4_XLARGE: A r4.xlarge instance type. R4_2XLARGE: A r4.2xlarge instance type. R4_4XLARGE: A r4.4xlarge instance type. R4_8XLARGE: A r4.8xlarge instance type. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"healthMetricsConfig"`: Controls how cache health metrics will be emitted to CloudWatch. + Cache health metrics include: NetworkBandwidthOutAllowanceExceeded: The network packets + dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for + diagnosing bottlenecks in a cache configuration. EngineCPUUtilization: The CPU + utilization (percentage) allocated to the Redis process. This is useful for diagnosing + bottlenecks in a cache configuration. Metrics will be recorded by API ID. You can set the + value to ENABLED or DISABLED. """ function update_api_cache( apiCachingBehavior, apiId, ttl, type; aws_config::AbstractAWSConfig=global_aws_config() @@ -2169,6 +2394,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"eventBridgeConfig"`: The new Amazon EventBridge settings. - `"httpConfig"`: The new HTTP endpoint configuration. - `"lambdaConfig"`: The new Lambda configuration. +- `"metricsConfig"`: Enables or disables enhanced data source metrics for specified data + sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior + value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to + FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can + still set its value. metricsConfig can be ENABLED or DISABLED. - `"openSearchServiceConfig"`: The new OpenSearch configuration. - `"relationalDatabaseConfig"`: The new relational database configuration. - `"serviceRoleArn"`: The new service role Amazon Resource Name (ARN) for the data source. @@ -2301,20 +2531,26 @@ function update_function( end """ - update_graphql_api(api_id, name) - update_graphql_api(api_id, name, params::Dict{String,<:Any}) + update_graphql_api(api_id, authentication_type, name) + update_graphql_api(api_id, authentication_type, name, params::Dict{String,<:Any}) Updates a GraphqlApi object. # Arguments - `api_id`: The API ID. +- `authentication_type`: The new authentication type for the GraphqlApi object. - `name`: The new name for the GraphqlApi object. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"additionalAuthenticationProviders"`: A list of additional authentication providers for the GraphqlApi API. -- `"authenticationType"`: The new authentication type for the GraphqlApi object. +- `"enhancedMetricsConfig"`: The enhancedMetricsConfig object. +- `"introspectionConfig"`: Sets the value of the GraphQL API to enable (ENABLED) or disable + (DISABLED) introspection. If no value is provided, the introspection configuration will be + set to ENABLED by default. This field will produce an error if the operation attempts to + use the introspection feature while this field is disabled. For more information about + introspection, see GraphQL introspection. - `"lambdaAuthorizerConfig"`: Configuration for Lambda function authorization. - `"logConfig"`: The Amazon CloudWatch Logs configuration for the GraphqlApi object. - `"mergedApiExecutionRoleArn"`: The Identity and Access Management service role ARN for a @@ -2324,21 +2560,35 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"openIDConnectConfig"`: The OpenID Connect configuration for the GraphqlApi object. - `"ownerContact"`: The owner contact information for an API resource. This field accepts any string input with a length of 0 - 256 characters. +- `"queryDepthLimit"`: The maximum depth a query can have in a single request. Depth refers + to the amount of nested levels allowed in the body of query. The default value is 0 (or + unspecified), which indicates there's no depth limit. If you set a limit, it can be between + 1 and 75 nested levels. This field will produce a limit error if the operation falls out of + bounds. Note that fields can still be set to nullable or non-nullable. If a non-nullable + field produces an error, the error will be thrown upwards to the first nullable field + available. +- `"resolverCountLimit"`: The maximum number of resolvers that can be invoked in a single + request. The default value is 0 (or unspecified), which will set the limit to 10000. When + specified, the limit value can be between 1 and 10000. This field will produce a limit + error if the operation falls out of bounds. - `"userPoolConfig"`: The new Amazon Cognito user pool configuration for the ~GraphqlApi object. - `"xrayEnabled"`: A flag indicating whether to use X-Ray tracing for the GraphqlApi. """ -function update_graphql_api(apiId, name; aws_config::AbstractAWSConfig=global_aws_config()) +function update_graphql_api( + apiId, authenticationType, name; aws_config::AbstractAWSConfig=global_aws_config() +) return appsync( "POST", "/v1/apis/$(apiId)", - Dict{String,Any}("name" => name); + Dict{String,Any}("authenticationType" => authenticationType, "name" => name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_graphql_api( apiId, + authenticationType, name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -2346,7 +2596,15 @@ function update_graphql_api( return appsync( "POST", "/v1/apis/$(apiId)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "authenticationType" => authenticationType, "name" => name + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2375,6 +2633,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys invoke a series of Function objects in a serial manner. You can use a pipeline resolver to run a GraphQL query against multiple data sources. - `"maxBatchSize"`: The maximum batching size for a resolver. +- `"metricsConfig"`: Enables or disables enhanced resolver metrics for specified resolvers. + Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set + to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to + FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can + still set its value. metricsConfig can be ENABLED or DISABLED. - `"pipelineConfig"`: The PipelineConfig. - `"requestMappingTemplate"`: The new request mapping template. A resolver uses a request mapping template to convert a GraphQL expression into a format that a data source can diff --git a/src/services/apptest.jl b/src/services/apptest.jl new file mode 100644 index 0000000000..5e585ee44b --- /dev/null +++ b/src/services/apptest.jl @@ -0,0 +1,893 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: apptest +using AWS.Compat +using AWS.UUIDs + +""" + create_test_case(name, steps) + create_test_case(name, steps, params::Dict{String,<:Any}) + +Creates a test case. + +# Arguments +- `name`: The name of the test case. +- `steps`: The steps in the test case. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The client token of the test case. +- `"description"`: The description of the test case. +- `"tags"`: The specified tags of the test case. +""" +function create_test_case(name, steps; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "POST", + "/testcase", + Dict{String,Any}( + "name" => name, "steps" => steps, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_test_case( + name, + steps, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "POST", + "/testcase", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "steps" => steps, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_test_configuration(name, resources) + create_test_configuration(name, resources, params::Dict{String,<:Any}) + +Creates a test configuration. + +# Arguments +- `name`: The name of the test configuration. +- `resources`: The defined resources of the test configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The client token of the test configuration. +- `"description"`: The description of the test configuration. +- `"properties"`: The properties of the test configuration. +- `"serviceSettings"`: The service settings of the test configuration. +- `"tags"`: The tags of the test configuration. +""" +function create_test_configuration( + name, resources; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "POST", + "/testconfiguration", + Dict{String,Any}( + "name" => name, "resources" => resources, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_test_configuration( + name, + resources, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "POST", + "/testconfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "resources" => resources, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_test_suite(name, test_cases) + create_test_suite(name, test_cases, params::Dict{String,<:Any}) + +Creates a test suite. + +# Arguments +- `name`: The name of the test suite. +- `test_cases`: The test cases in the test suite. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"afterSteps"`: The after steps of the test suite. +- `"beforeSteps"`: The before steps of the test suite. +- `"clientToken"`: The client token of the test suite. +- `"description"`: The description of the test suite. +- `"tags"`: The tags of the test suite. +""" +function create_test_suite( + name, testCases; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "POST", + "/testsuite", + Dict{String,Any}( + "name" => name, "testCases" => testCases, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_test_suite( + name, + testCases, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "POST", + "/testsuite", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "testCases" => testCases, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_test_case(test_case_id) + delete_test_case(test_case_id, params::Dict{String,<:Any}) + +Deletes a test case. + +# Arguments +- `test_case_id`: The test case ID of the test case. + +""" +function delete_test_case(testCaseId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "DELETE", + "/testcases/$(testCaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_test_case( + testCaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "DELETE", + "/testcases/$(testCaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_test_configuration(test_configuration_id) + delete_test_configuration(test_configuration_id, params::Dict{String,<:Any}) + +Deletes a test configuration. + +# Arguments +- `test_configuration_id`: The test ID of the test configuration. + +""" +function delete_test_configuration( + testConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "DELETE", + "/testconfigurations/$(testConfigurationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_test_configuration( + testConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "DELETE", + "/testconfigurations/$(testConfigurationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_test_run(test_run_id) + delete_test_run(test_run_id, params::Dict{String,<:Any}) + +Deletes a test run. + +# Arguments +- `test_run_id`: The run ID of the test run. + +""" +function delete_test_run(testRunId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "DELETE", + "/testruns/$(testRunId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_test_run( + testRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "DELETE", + "/testruns/$(testRunId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_test_suite(test_suite_id) + delete_test_suite(test_suite_id, params::Dict{String,<:Any}) + +Deletes a test suite. + +# Arguments +- `test_suite_id`: The test ID of the test suite. + +""" +function delete_test_suite(testSuiteId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "DELETE", + "/testsuites/$(testSuiteId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_test_suite( + testSuiteId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "DELETE", + "/testsuites/$(testSuiteId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_test_case(test_case_id) + get_test_case(test_case_id, params::Dict{String,<:Any}) + +Gets a test case. + +# Arguments +- `test_case_id`: The request test ID of the test case. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"testCaseVersion"`: The test case version of the test case. +""" +function get_test_case(testCaseId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", + "/testcases/$(testCaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_test_case( + testCaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testcases/$(testCaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_test_configuration(test_configuration_id) + get_test_configuration(test_configuration_id, params::Dict{String,<:Any}) + +Gets a test configuration. + +# Arguments +- `test_configuration_id`: The request test configuration ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"testConfigurationVersion"`: The test configuration version. +""" +function get_test_configuration( + testConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", + "/testconfigurations/$(testConfigurationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_test_configuration( + testConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testconfigurations/$(testConfigurationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_test_run_step(step_name, test_run_id) + get_test_run_step(step_name, test_run_id, params::Dict{String,<:Any}) + +Gets a test run step. + +# Arguments +- `step_name`: The step name of the test run step. +- `test_run_id`: The test run ID of the test run step. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"testCaseId"`: The test case ID of a test run step. +- `"testSuiteId"`: The test suite ID of a test run step. +""" +function get_test_run_step( + stepName, testRunId; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", + "/testruns/$(testRunId)/steps/$(stepName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_test_run_step( + stepName, + testRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testruns/$(testRunId)/steps/$(stepName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_test_suite(test_suite_id) + get_test_suite(test_suite_id, params::Dict{String,<:Any}) + +Gets a test suite. + +# Arguments +- `test_suite_id`: The ID of the test suite. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"testSuiteVersion"`: The version of the test suite. +""" +function get_test_suite(testSuiteId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", + "/testsuites/$(testSuiteId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_test_suite( + testSuiteId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testsuites/$(testSuiteId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists tags for a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_test_cases() + list_test_cases(params::Dict{String,<:Any}) + +Lists test cases. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum results of the test case. +- `"nextToken"`: The next token of the test cases. +- `"testCaseIds"`: The IDs of the test cases. +""" +function list_test_cases(; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", "/testcases"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_test_cases( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", "/testcases", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_test_configurations() + list_test_configurations(params::Dict{String,<:Any}) + +Lists test configurations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum results of the test configuration. +- `"nextToken"`: The next token for the test configurations. +- `"testConfigurationIds"`: The configuration IDs of the test configurations. +""" +function list_test_configurations(; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", "/testconfigurations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_test_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", + "/testconfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_test_run_steps(test_run_id) + list_test_run_steps(test_run_id, params::Dict{String,<:Any}) + +Lists test run steps. + +# Arguments +- `test_run_id`: The test run ID of the test run steps. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of test run steps to return in one page of results. +- `"nextToken"`: The token from a previous step to retrieve the next page of results. +- `"testCaseId"`: The test case ID of the test run steps. +- `"testSuiteId"`: The test suite ID of the test run steps. +""" +function list_test_run_steps(testRunId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", + "/testruns/$(testRunId)/steps"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_test_run_steps( + testRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testruns/$(testRunId)/steps", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_test_run_test_cases(test_run_id) + list_test_run_test_cases(test_run_id, params::Dict{String,<:Any}) + +Lists test run test cases. + +# Arguments +- `test_run_id`: The test run ID of the test cases. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of test run test cases to return in one page of + results. +- `"nextToken"`: The token from a previous request to retrieve the next page of results. +""" +function list_test_run_test_cases( + testRunId; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", + "/testruns/$(testRunId)/testcases"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_test_run_test_cases( + testRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "GET", + "/testruns/$(testRunId)/testcases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_test_runs() + list_test_runs(params::Dict{String,<:Any}) + +Lists test runs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of test runs to return in one page of results. +- `"nextToken"`: The token from the previous request to retrieve the next page of test run + results. +- `"testSuiteId"`: The test suite ID of the test runs. +- `"testrunIds"`: The test run IDs of the test runs. +""" +function list_test_runs(; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", "/testruns"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_test_runs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", "/testruns", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_test_suites() + list_test_suites(params::Dict{String,<:Any}) + +Lists test suites. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of test suites to return in one page of results. +- `"nextToken"`: The token from a previous request to retrieve the next page of results. +- `"testSuiteIds"`: The suite ID of the test suites. +""" +function list_test_suites(; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "GET", "/testsuites"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_test_suites( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "GET", "/testsuites", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + start_test_run(test_suite_id) + start_test_run(test_suite_id, params::Dict{String,<:Any}) + +Starts a test run. + +# Arguments +- `test_suite_id`: The test suite ID of the test run. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The client token of the test run. +- `"tags"`: The tags of the test run. +- `"testConfigurationId"`: The configuration ID of the test run. +""" +function start_test_run(testSuiteId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "POST", + "/testrun", + Dict{String,Any}("testSuiteId" => testSuiteId, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_test_run( + testSuiteId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "POST", + "/testrun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "testSuiteId" => testSuiteId, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Specifies tags of a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the tag resource. +- `tags`: The tags of the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Untags a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tag_keys`: The tag keys of the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_test_case(test_case_id) + update_test_case(test_case_id, params::Dict{String,<:Any}) + +Updates a test case. + +# Arguments +- `test_case_id`: The test case ID of the test case. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the test case. +- `"steps"`: The steps of the test case. +""" +function update_test_case(testCaseId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "PATCH", + "/testcases/$(testCaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_test_case( + testCaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "PATCH", + "/testcases/$(testCaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_test_configuration(test_configuration_id) + update_test_configuration(test_configuration_id, params::Dict{String,<:Any}) + +Updates a test configuration. + +# Arguments +- `test_configuration_id`: The test configuration ID of the test configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the test configuration. +- `"properties"`: The properties of the test configuration. +- `"resources"`: The resources of the test configuration. +- `"serviceSettings"`: The service settings of the test configuration. +""" +function update_test_configuration( + testConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return apptest( + "PATCH", + "/testconfigurations/$(testConfigurationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_test_configuration( + testConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "PATCH", + "/testconfigurations/$(testConfigurationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_test_suite(test_suite_id) + update_test_suite(test_suite_id, params::Dict{String,<:Any}) + +Updates a test suite. + +# Arguments +- `test_suite_id`: The test suite ID of the test suite. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"afterSteps"`: The after steps of the test suite. +- `"beforeSteps"`: The before steps for the test suite. +- `"description"`: The description of the test suite. +- `"testCases"`: The test cases in the test suite. +""" +function update_test_suite(testSuiteId; aws_config::AbstractAWSConfig=global_aws_config()) + return apptest( + "PATCH", + "/testsuites/$(testSuiteId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_test_suite( + testSuiteId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return apptest( + "PATCH", + "/testsuites/$(testSuiteId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/arc_zonal_shift.jl b/src/services/arc_zonal_shift.jl index e827116a8c..57a6c9300b 100644 --- a/src/services/arc_zonal_shift.jl +++ b/src/services/arc_zonal_shift.jl @@ -8,8 +8,10 @@ using AWS.UUIDs cancel_zonal_shift(zonal_shift_id) cancel_zonal_shift(zonal_shift_id, params::Dict{String,<:Any}) -Cancel a zonal shift in Amazon Route 53 Application Recovery Controller that you've started -for a resource in your AWS account in an AWS Region. +Cancel a zonal shift in Amazon Route 53 Application Recovery Controller. To cancel the +zonal shift, specify the zonal shift ID. A zonal shift can be one that you've started for a +resource in your Amazon Web Services account in an Amazon Web Services Region, or it can be +a zonal shift started by a practice run with zonal autoshift. # Arguments - `zonal_shift_id`: The internally-generated identifier of a zonal shift. @@ -37,21 +39,141 @@ function cancel_zonal_shift( ) end +""" + create_practice_run_configuration(outcome_alarms, resource_identifier) + create_practice_run_configuration(outcome_alarms, resource_identifier, params::Dict{String,<:Any}) + +A practice run configuration for zonal autoshift is required when you enable zonal +autoshift. A practice run configuration includes specifications for blocked dates and +blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice +runs. The alarms that you specify are an outcome alarm, to monitor application health +during practice runs and, optionally, a blocking alarm, to block practice runs from +starting. For more information, see Considerations when you configure zonal autoshift in +the Amazon Route 53 Application Recovery Controller Developer Guide. + +# Arguments +- `outcome_alarms`: The outcome alarm for practice runs is a required Amazon CloudWatch + alarm that you specify that ends a practice run when the alarm is in an ALARM state. + Configure the alarm to monitor the health of your application when traffic is shifted away + from an Availability Zone during each weekly practice run. You should configure the alarm + to go into an ALARM state if your application is impacted by the zonal shift, and you want + to stop the zonal shift, to let traffic for the resource return to the Availability Zone. +- `resource_identifier`: The identifier of the resource to shift away traffic for when a + practice run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the + resource. At this time, supported resources are Network Load Balancers and Application Load + Balancers with cross-zone load balancing turned off. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"blockedDates"`: Optionally, you can block Route 53 ARC from starting practice runs for + a resource on specific calendar dates. The format for blocked dates is: YYYY-MM-DD. Keep in + mind, when you specify dates, that dates and times for practice runs are in UTC. Separate + multiple blocked dates with spaces. For example, if you have an application update + scheduled to launch on May 1, 2024, and you don't want practice runs to shift traffic away + at that time, you could set a blocked date for 2024-05-01. +- `"blockedWindows"`: Optionally, you can block Route 53 ARC from starting practice runs + for specific windows of days and times. The format for blocked windows is: + DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates, that dates and times for + practice runs are in UTC. Also, be aware of potential time adjustments that might be + required for daylight saving time differences. Separate multiple blocked windows with + spaces. For example, say you run business report summaries three days a week. For this + scenario, you might set the following recurring days and times as blocked windows, for + example: MON-20:30-21:30 WED-20:30-21:30 FRI-20:30-21:30. +- `"blockingAlarms"`: An Amazon CloudWatch alarm that you can specify for zonal autoshift + practice runs. This alarm blocks Route 53 ARC from starting practice run zonal shifts, and + ends a practice run that's in progress, when the alarm is in an ALARM state. +""" +function create_practice_run_configuration( + outcomeAlarms, resourceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "POST", + "/configuration", + Dict{String,Any}( + "outcomeAlarms" => outcomeAlarms, "resourceIdentifier" => resourceIdentifier + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_practice_run_configuration( + outcomeAlarms, + resourceIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return arc_zonal_shift( + "POST", + "/configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "outcomeAlarms" => outcomeAlarms, + "resourceIdentifier" => resourceIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_practice_run_configuration(resource_identifier) + delete_practice_run_configuration(resource_identifier, params::Dict{String,<:Any}) + +Deletes the practice run configuration for a resource. Before you can delete a practice run +configuration for a resource., you must disable zonal autoshift for the resource. Practice +runs must be configured for zonal autoshift to be enabled. + +# Arguments +- `resource_identifier`: The identifier for the resource that you want to delete the + practice run configuration for. The identifier is the Amazon Resource Name (ARN) for the + resource. + +""" +function delete_practice_run_configuration( + resourceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "DELETE", + "/configuration/$(resourceIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_practice_run_configuration( + resourceIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return arc_zonal_shift( + "DELETE", + "/configuration/$(resourceIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_managed_resource(resource_identifier) get_managed_resource(resource_identifier, params::Dict{String,<:Any}) Get information about a resource that's been registered for zonal shifts with Amazon Route -53 Application Recovery Controller in this AWS Region. Resources that are registered for -zonal shifts are managed resources in Route 53 ARC. At this time, you can only start a -zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load -balancing turned off. +53 Application Recovery Controller in this Amazon Web Services Region. Resources that are +registered for zonal shifts are managed resources in Route 53 ARC. You can start zonal +shifts and configure zonal autoshift for managed resources. At this time, you can only +start a zonal shift or configure zonal autoshift for Network Load Balancers and Application +Load Balancers with cross-zone load balancing turned off. # Arguments -- `resource_identifier`: The identifier for the resource to include in a zonal shift. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, you can only - start a zonal shift for Network Load Balancers and Application Load Balancers with - cross-zone load balancing turned off. +- `resource_identifier`: The identifier for the resource to shift away traffic for. The + identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported + resources are Network Load Balancers and Application Load Balancers with cross-zone load + balancing turned off. """ function get_managed_resource( @@ -78,14 +200,43 @@ function get_managed_resource( ) end +""" + list_autoshifts() + list_autoshifts(params::Dict{String,<:Any}) + +Returns the active autoshifts for a specified resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The number of objects that you want to return with this call. +- `"nextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +- `"status"`: The status of the autoshift. +""" +function list_autoshifts(; aws_config::AbstractAWSConfig=global_aws_config()) + return arc_zonal_shift( + "GET", "/autoshifts"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_autoshifts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", "/autoshifts", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_managed_resources() list_managed_resources(params::Dict{String,<:Any}) -Lists all the resources in your AWS account in this AWS Region that are managed for zonal -shifts in Amazon Route 53 Application Recovery Controller, and information about them. The -information includes their Amazon Resource Names (ARNs), the Availability Zones the -resources are deployed in, and the resource name. +Lists all the resources in your Amazon Web Services account in this Amazon Web Services +Region that are managed for zonal shifts in Amazon Route 53 Application Recovery +Controller, and information about them. The information includes the zonal autoshift status +for the resource, as well as the Amazon Resource Name (ARN), the Availability Zones that +each resource is deployed in, and the resource name. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -116,8 +267,12 @@ end list_zonal_shifts() list_zonal_shifts(params::Dict{String,<:Any}) -Lists all the active zonal shifts in Amazon Route 53 Application Recovery Controller in -your AWS account in this AWS Region. +Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery +Controller in your Amazon Web Services account in this Amazon Web Services Region. +ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts +that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation +does not list autoshifts. For more information about listing autoshifts, see +\">ListAutoshifts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -126,9 +281,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results. +- `"resourceIdentifier"`: The identifier for the resource that you want to list zonal + shifts for. The identifier is the Amazon Resource Name (ARN) for the resource. - `"status"`: A status for a zonal shift. The Status for a zonal shift can have one of the - following values: ACTIVE: The zonal shift is started and active. EXPIRED: The zonal - shift has expired (the expiry time was exceeded). CANCELED: The zonal shift was + following values: ACTIVE: The zonal shift has been started and active. EXPIRED: The + zonal shift has expired (the expiry time was exceeded). CANCELED: The zonal shift was canceled. """ function list_zonal_shifts(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -153,22 +310,23 @@ end start_zonal_shift(away_from, comment, expires_in, resource_identifier, params::Dict{String,<:Any}) You start a zonal shift to temporarily move load balancer traffic away from an Availability -Zone in a AWS Region, to help your application recover immediately, for example, from a -developer's bad code deployment or from an AWS infrastructure failure in a single -Availability Zone. You can start a zonal shift in Route 53 ARC only for managed resources -in your account in an AWS Region. Resources are automatically registered with Route 53 ARC -by AWS services. At this time, you can only start a zonal shift for Network Load Balancers -and Application Load Balancers with cross-zone load balancing turned off. When you start a -zonal shift, traffic for the resource is no longer routed to the Availability Zone. The -zonal shift is created immediately in Route 53 ARC. However, it can take a short time, -typically up to a few minutes, for existing, in-progress connections in the Availability -Zone to complete. For more information, see Zonal shift in the Amazon Route 53 Application -Recovery Controller Developer Guide. +Zone in an Amazon Web Services Region, to help your application recover immediately, for +example, from a developer's bad code deployment or from an Amazon Web Services +infrastructure failure in a single Availability Zone. You can start a zonal shift in Route +53 ARC only for managed resources in your Amazon Web Services account in an Amazon Web +Services Region. Resources are automatically registered with Route 53 ARC by Amazon Web +Services services. At this time, you can only start a zonal shift for Network Load +Balancers and Application Load Balancers with cross-zone load balancing turned off. When +you start a zonal shift, traffic for the resource is no longer routed to the Availability +Zone. The zonal shift is created immediately in Route 53 ARC. However, it can take a short +time, typically up to a few minutes, for existing, in-progress connections in the +Availability Zone to complete. For more information, see Zonal shift in the Amazon Route 53 +Application Recovery Controller Developer Guide. # Arguments - `away_from`: The Availability Zone that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the - resource is instead moved to other Availability Zones in the AWS Region. + resource is instead moved to other Availability Zones in the Amazon Web Services Region. - `comment`: A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. @@ -179,16 +337,13 @@ Recovery Controller Developer Guide. expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to restore traffic to the Availability Zone. To set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space: - <ul> <li> <p> <b>A lowercase letter m:</b> To specify that - the value is in minutes.</p> </li> <li> <p> <b>A lowercase - letter h:</b> To specify that the value is in hours.</p> </li> - </ul> <p>For example: <code>20h</code> means the zonal shift - expires in 20 hours. <code>120m</code> means the zonal shift expires in 120 - minutes (2 hours).</p> -- `resource_identifier`: The identifier for the resource to include in a zonal shift. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, you can only - start a zonal shift for Network Load Balancers and Application Load Balancers with - cross-zone load balancing turned off. + A lowercase letter m: To specify that the value is in minutes. A lowercase letter h: To + specify that the value is in hours. For example: 20h means the zonal shift expires in 20 + hours. 120m means the zonal shift expires in 120 minutes (2 hours). +- `resource_identifier`: The identifier for the resource to shift away traffic for. The + identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported + resources are Network Load Balancers and Application Load Balancers with cross-zone load + balancing turned off. """ function start_zonal_shift( @@ -239,13 +394,123 @@ function start_zonal_shift( ) end +""" + update_practice_run_configuration(resource_identifier) + update_practice_run_configuration(resource_identifier, params::Dict{String,<:Any}) + +Update a practice run configuration to change one or more of the following: add, change, or +remove the blocking alarm; change the outcome alarm; or add, change, or remove blocking +dates or time windows. + +# Arguments +- `resource_identifier`: The identifier for the resource that you want to update the + practice run configuration for. The identifier is the Amazon Resource Name (ARN) for the + resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"blockedDates"`: Add, change, or remove blocked dates for a practice run in zonal + autoshift. Optionally, you can block practice runs for specific calendar dates. The format + for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, that dates and + times for practice runs are in UTC. Separate multiple blocked dates with spaces. For + example, if you have an application update scheduled to launch on May 1, 2024, and you + don't want practice runs to shift traffic away at that time, you could set a blocked date + for 2024-05-01. +- `"blockedWindows"`: Add, change, or remove windows of days and times for when you can, + optionally, block Route 53 ARC from starting a practice run for a resource. The format for + blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates, that dates + and times for practice runs are in UTC. Also, be aware of potential time adjustments that + might be required for daylight saving time differences. Separate multiple blocked windows + with spaces. For example, say you run business report summaries three days a week. For this + scenario, you might set the following recurring days and times as blocked windows, for + example: MON-20:30-21:30 WED-20:30-21:30 FRI-20:30-21:30. +- `"blockingAlarms"`: Add, change, or remove the Amazon CloudWatch alarm that you + optionally specify as the blocking alarm for practice runs. +- `"outcomeAlarms"`: Specify a new the Amazon CloudWatch alarm as the outcome alarm for + practice runs. +""" +function update_practice_run_configuration( + resourceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PATCH", + "/configuration/$(resourceIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_practice_run_configuration( + resourceIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return arc_zonal_shift( + "PATCH", + "/configuration/$(resourceIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status) + update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status, params::Dict{String,<:Any}) + +You can update the zonal autoshift status for a resource, to enable or disable zonal +autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away resource +traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that +there's an issue in the Availability Zone that could potentially affect customers. + +# Arguments +- `resource_identifier`: The identifier for the resource that you want to update the zonal + autoshift configuration for. The identifier is the Amazon Resource Name (ARN) for the + resource. +- `zonal_autoshift_status`: The zonal autoshift status for the resource that you want to + update the zonal autoshift configuration for. + +""" +function update_zonal_autoshift_configuration( + resourceIdentifier, + zonalAutoshiftStatus; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return arc_zonal_shift( + "PUT", + "/managedresources/$(resourceIdentifier)", + Dict{String,Any}("zonalAutoshiftStatus" => zonalAutoshiftStatus); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_zonal_autoshift_configuration( + resourceIdentifier, + zonalAutoshiftStatus, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return arc_zonal_shift( + "PUT", + "/managedresources/$(resourceIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("zonalAutoshiftStatus" => zonalAutoshiftStatus), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_zonal_shift(zonal_shift_id) update_zonal_shift(zonal_shift_id, params::Dict{String,<:Any}) -Update an active zonal shift in Amazon Route 53 Application Recovery Controller in your AWS -account. You can update a zonal shift to set a new expiration, or edit or replace the -comment for the zonal shift. +Update an active zonal shift in Amazon Route 53 Application Recovery Controller in your +Amazon Web Services account. You can update a zonal shift to set a new expiration, or edit +or replace the comment for the zonal shift. # Arguments - `zonal_shift_id`: The identifier of a zonal shift. diff --git a/src/services/artifact.jl b/src/services/artifact.jl new file mode 100644 index 0000000000..7db7f478a1 --- /dev/null +++ b/src/services/artifact.jl @@ -0,0 +1,210 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: artifact +using AWS.Compat +using AWS.UUIDs + +""" + get_account_settings() + get_account_settings(params::Dict{String,<:Any}) + +Get the account settings for Artifact. + +""" +function get_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "GET", + "/v1/account-settings/get"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return artifact( + "GET", + "/v1/account-settings/get", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_report(report_id, term_token) + get_report(report_id, term_token, params::Dict{String,<:Any}) + +Get the content for a single report. + +# Arguments +- `report_id`: Unique resource ID for the report resource. +- `term_token`: Unique download token provided by GetTermForReport API. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reportVersion"`: Version for the report resource. +""" +function get_report(reportId, termToken; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "GET", + "/v1/report/get", + Dict{String,Any}("reportId" => reportId, "termToken" => termToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_report( + reportId, + termToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return artifact( + "GET", + "/v1/report/get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("reportId" => reportId, "termToken" => termToken), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_report_metadata(report_id) + get_report_metadata(report_id, params::Dict{String,<:Any}) + +Get the metadata for a single report. + +# Arguments +- `report_id`: Unique resource ID for the report resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reportVersion"`: Version for the report resource. +""" +function get_report_metadata(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "GET", + "/v1/report/getMetadata", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_report_metadata( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return artifact( + "GET", + "/v1/report/getMetadata", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_term_for_report(report_id) + get_term_for_report(report_id, params::Dict{String,<:Any}) + +Get the Term content associated with a single report. + +# Arguments +- `report_id`: Unique resource ID for the report resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reportVersion"`: Version for the report resource. +""" +function get_term_for_report(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "GET", + "/v1/report/getTermForReport", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_term_for_report( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return artifact( + "GET", + "/v1/report/getTermForReport", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_reports() + list_reports(params::Dict{String,<:Any}) + +List available reports. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of resources to return in the paginated response. +- `"nextToken"`: Pagination token to request the next page of resources. +""" +function list_reports(; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "GET", "/v1/report/list"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_reports( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return artifact( + "GET", + "/v1/report/list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_account_settings() + put_account_settings(params::Dict{String,<:Any}) + +Put the account settings for Artifact. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"notificationSubscriptionStatus"`: Desired notification subscription status. +""" +function put_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return artifact( + "PUT", + "/v1/account-settings/put"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return artifact( + "PUT", + "/v1/account-settings/put", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/athena.jl b/src/services/athena.jl index 35645fdf48..7aa6c7dfe0 100644 --- a/src/services/athena.jl +++ b/src/services/athena.jl @@ -246,10 +246,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one - and cannot modify. Queries that specify a Glue Data Catalog other than the default - AwsDataCatalog must be run on Athena engine version 2. In Regions where Athena engine - version 2 is not available, creating new Glue data catalogs results in an INVALID_INPUT - error. + and cannot modify. - `"Tags"`: A list of comma separated tags to add to the data catalog that is created. """ function create_data_catalog(Name, Type; aws_config::AbstractAWSConfig=global_aws_config()) @@ -281,8 +278,7 @@ end create_named_query(database, name, query_string, params::Dict{String,<:Any}) Creates a named query in the specified workgroup. Requires that you have access to the -workgroup. For code samples using the Amazon Web Services SDK for Java, see Examples and -Code Samples in the Amazon Athena User Guide. +workgroup. # Arguments - `database`: The database to which the query belongs. @@ -595,8 +591,6 @@ end delete_named_query(named_query_id, params::Dict{String,<:Any}) Deletes the named query if you have access to the workgroup in which the query was saved. -For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples -in the Amazon Athena User Guide. # Arguments - `named_query_id`: The unique ID of the query to delete. @@ -966,6 +960,10 @@ Returns the specified data catalog. # Arguments - `name`: The name of the data catalog to return. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WorkGroup"`: The name of the workgroup. Required if making an IAM Identity Center + request. """ function get_data_catalog(Name; aws_config::AbstractAWSConfig=global_aws_config()) return athena( @@ -996,6 +994,10 @@ Returns a database object for the specified database and data catalog. - `catalog_name`: The name of the data catalog that contains the database to return. - `database_name`: The name of the database to return. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WorkGroup"`: The name of the workgroup for which the metadata is being fetched. + Required if requesting an IAM Identity Center enabled Glue Data Catalog. """ function get_database( CatalogName, DatabaseName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1237,10 +1239,12 @@ end get_query_runtime_statistics(query_execution_id, params::Dict{String,<:Any}) Returns query execution runtime statistics related to a single execution of a query if you -have access to the workgroup in which the query ran. Query execution runtime statistics are -returned only when QueryExecutionStatusState is in a SUCCEEDED or FAILED state. Stage-level -input and output row count and data size statistics are not shown when a query has -row-level filters defined in Lake Formation. +have access to the workgroup in which the query ran. Statistics from the Timeline section +of the response object are available as soon as QueryExecutionStatusState is in a SUCCEEDED +or FAILED state. The remaining non-timeline statistics in the response (like stage-level +input and output row count and data size) are updated asynchronously and may not be +available immediately after a query completes. The non-timeline statistics are also not +included when a query has row-level filters defined in Lake Formation. # Arguments - `query_execution_id`: The unique ID of the query execution. @@ -1352,6 +1356,10 @@ Returns table metadata for the specified catalog, database, and table. - `database_name`: The name of the database that contains the table metadata to return. - `table_name`: The name of the table for which metadata is returned. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WorkGroup"`: The name of the workgroup for which the metadata is being fetched. + Required if requesting an IAM Identity Center enabled Glue Data Catalog. """ function get_table_metadata( CatalogName, DatabaseName, TableName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1426,16 +1434,17 @@ function get_work_group( end """ - import_notebook(name, payload, type, work_group) - import_notebook(name, payload, type, work_group, params::Dict{String,<:Any}) + import_notebook(name, type, work_group) + import_notebook(name, type, work_group, params::Dict{String,<:Any}) -Imports a single ipynb file to a Spark enabled workgroup. The maximum file size that can be -imported is 10 megabytes. If an ipynb file with the same name already exists in the -workgroup, throws an error. +Imports a single ipynb file to a Spark enabled workgroup. To import the notebook, the +request must specify a value for either Payload or NoteBookS3LocationUri. If neither is +specified or both are specified, an InvalidRequestException occurs. The maximum file size +that can be imported is 10 megabytes. If an ipynb file with the same name already exists in +the workgroup, throws an error. # Arguments - `name`: The name of the notebook to import. -- `payload`: The notebook content to be imported. - `type`: The notebook content type. Currently, the only valid type is IPYNB. - `work_group`: The name of the Spark enabled workgroup to import the notebook to. @@ -1446,22 +1455,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail. +- `"NotebookS3LocationUri"`: A URI that specifies the Amazon S3 location of a notebook file + in ipynb format. +- `"Payload"`: The notebook content to be imported. The payload must be in ipynb format. """ function import_notebook( - Name, Payload, Type, WorkGroup; aws_config::AbstractAWSConfig=global_aws_config() + Name, Type, WorkGroup; aws_config::AbstractAWSConfig=global_aws_config() ) return athena( "ImportNotebook", - Dict{String,Any}( - "Name" => Name, "Payload" => Payload, "Type" => Type, "WorkGroup" => WorkGroup - ); + Dict{String,Any}("Name" => Name, "Type" => Type, "WorkGroup" => WorkGroup); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function import_notebook( Name, - Payload, Type, WorkGroup, params::AbstractDict{String}; @@ -1472,12 +1481,7 @@ function import_notebook( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "Name" => Name, - "Payload" => Payload, - "Type" => Type, - "WorkGroup" => WorkGroup, - ), + Dict{String,Any}("Name" => Name, "Type" => Type, "WorkGroup" => WorkGroup), params, ), ); @@ -1607,6 +1611,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call. +- `"WorkGroup"`: The name of the workgroup. Required if making an IAM Identity Center + request. """ function list_data_catalogs(; aws_config::AbstractAWSConfig=global_aws_config()) return athena( @@ -1636,6 +1642,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call. +- `"WorkGroup"`: The name of the workgroup for which the metadata is being fetched. + Required if requesting an IAM Identity Center enabled Glue Data Catalog. """ function list_databases(CatalogName; aws_config::AbstractAWSConfig=global_aws_config()) return athena( @@ -1738,8 +1746,7 @@ end Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, -lists the saved queries for the primary workgroup. For code samples using the Amazon Web -Services SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide. +lists the saved queries for the primary workgroup. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1893,10 +1900,9 @@ end list_query_executions(params::Dict{String,<:Any}) Provides a list of available query execution IDs for the queries in the specified -workgroup. If a workgroup is not specified, returns a list of query execution IDs for the -primary workgroup. Requires you to have access to the workgroup in which the queries ran. -For code samples using the Amazon Web Services SDK for Java, see Examples and Code Samples -in the Amazon Athena User Guide. +workgroup. Athena keeps a query history for 45 days. If a workgroup is not specified, +returns a list of query execution IDs for the primary workgroup. Requires you to have +access to the workgroup in which the queries ran. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1989,6 +1995,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call. +- `"WorkGroup"`: The name of the workgroup for which the metadata is being fetched. + Required if requesting an IAM Identity Center enabled Glue Data Catalog. """ function list_table_metadata( CatalogName, DatabaseName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2145,7 +2153,11 @@ end start_calculation_execution(session_id, params::Dict{String,<:Any}) Submits calculations for execution within a session. You can supply the code to run as an -inline code block within the request. +inline code block within the request. The request syntax requires the +StartCalculationExecutionRequestCodeBlock parameter or the +CalculationConfigurationCodeBlock parameter, but not both. Because +CalculationConfigurationCodeBlock is deprecated, use the +StartCalculationExecutionRequestCodeBlock parameter instead. # Arguments - `session_id`: The session ID. @@ -2161,7 +2173,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services SDK for Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail. -- `"CodeBlock"`: A string that contains the code of the calculation. +- `"CodeBlock"`: A string that contains the code of the calculation. Use this parameter + instead of CalculationConfigurationCodeBlock, which is deprecated. - `"Description"`: A description of the calculation. """ function start_calculation_execution( @@ -2205,11 +2218,14 @@ SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request - is received, the same response is returned and another query is not created. If a parameter - has changed, for example, the QueryString, an error is returned. This token is listed as - not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for - Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK - or the Amazon Web Services CLI, you must provide this token or the action will fail. + is received, the same response is returned and another query is not created. An error is + returned if a parameter, such as QueryString, has changed. A call to StartQueryExecution + that uses a previous client request token returns the same QueryExecutionId even if the + requester doesn't have permission on the tables specified in QueryString. This token is + listed as not required because Amazon Web Services SDKs (for example the Amazon Web + Services SDK for Java) auto-generate the token for users. If you are not using the Amazon + Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action + will fail. - `"ExecutionParameters"`: A list of values for the parameters in a query. The values are applied sequentially to the parameters in the query in the order in which the parameters occur. @@ -2367,8 +2383,7 @@ end stop_query_execution(query_execution_id, params::Dict{String,<:Any}) Stops a query execution. Requires you to have access to the workgroup in which the query -ran. For code samples using the Amazon Web Services SDK for Java, see Examples and Code -Samples in the Amazon Athena User Guide. +ran. # Arguments - `query_execution_id`: The unique ID of the query execution to stop. diff --git a/src/services/auditmanager.jl b/src/services/auditmanager.jl index 6b33ae45a8..2fa4b3ca67 100644 --- a/src/services/auditmanager.jl +++ b/src/services/auditmanager.jl @@ -238,15 +238,15 @@ end batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence) batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence, params::Dict{String,<:Any}) -Uploads one or more pieces of evidence to a control in an Audit Manager assessment. You can -upload manual evidence from any Amazon Simple Storage Service (Amazon S3) bucket by -specifying the S3 URI of the evidence. You must upload manual evidence to your S3 bucket -before you can upload it to your assessment. For instructions, see CreateBucket and -PutObject in the Amazon Simple Storage Service API Reference. The following restrictions -apply to this action: Maximum size of an individual evidence file: 100 MB Number of -daily manual evidence uploads per control: 100 Supported file formats: See Supported file -types for manual evidence in the Audit Manager User Guide For more information about -Audit Manager service restrictions, see Quotas and restrictions for Audit Manager. +Adds one or more pieces of evidence to a control in an Audit Manager assessment. You can +import manual evidence from any S3 bucket by specifying the S3 URI of the object. You can +also upload a file from your browser, or enter plain text in response to a risk assessment +question. The following restrictions apply to this action: manualEvidence can be only +one of the following: evidenceFileName, s3ResourcePath, or textResponse Maximum size of +an individual evidence file: 100 MB Number of daily manual evidence uploads per control: +100 Supported file formats: See Supported file types for manual evidence in the Audit +Manager User Guide For more information about Audit Manager service restrictions, see +Quotas and restrictions for Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -671,7 +671,11 @@ end delete_control(control_id) delete_control(control_id, params::Dict{String,<:Any}) - Deletes a custom control in Audit Manager. + Deletes a custom control in Audit Manager. When you invoke this operation, the custom +control is deleted from any frameworks or assessments that it’s currently part of. As a +result, Audit Manager will stop collecting evidence for that custom control in all of your +assessments. This includes assessments that you previously created before you deleted the +custom control. # Arguments - `control_id`: The unique identifier for the control. @@ -838,7 +842,7 @@ end get_account_status() get_account_status(params::Dict{String,<:Any}) - Returns the registration status of an account in Audit Manager. + Gets the registration status of an account in Audit Manager. """ function get_account_status(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -862,7 +866,7 @@ end get_assessment(assessment_id) get_assessment(assessment_id, params::Dict{String,<:Any}) -Returns an assessment from Audit Manager. +Gets information about a specified assessment. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -894,7 +898,7 @@ end get_assessment_framework(framework_id) get_assessment_framework(framework_id, params::Dict{String,<:Any}) -Returns a framework from Audit Manager. +Gets information about a specified framework. # Arguments - `framework_id`: The identifier for the framework. @@ -928,7 +932,7 @@ end get_assessment_report_url(assessment_id, assessment_report_id) get_assessment_report_url(assessment_id, assessment_report_id, params::Dict{String,<:Any}) - Returns the URL of an assessment report in Audit Manager. + Gets the URL of an assessment report in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -964,7 +968,7 @@ end get_change_logs(assessment_id) get_change_logs(assessment_id, params::Dict{String,<:Any}) - Returns a list of changelogs from Audit Manager. + Gets a list of changelogs from Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1003,7 +1007,7 @@ end get_control(control_id) get_control(control_id, params::Dict{String,<:Any}) - Returns a control from Audit Manager. + Gets information about a specified control. # Arguments - `control_id`: The identifier for the control. @@ -1035,7 +1039,7 @@ end get_delegations() get_delegations(params::Dict{String,<:Any}) - Returns a list of delegations from an audit owner to a delegate. + Gets a list of delegations from an audit owner to a delegate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1064,7 +1068,7 @@ end get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id) get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id, params::Dict{String,<:Any}) - Returns evidence from Audit Manager. + Gets information about a specified evidence item. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1109,7 +1113,7 @@ end get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns all evidence from a specified evidence folder in Audit Manager. + Gets all evidence from a specified evidence folder in Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1152,11 +1156,55 @@ function get_evidence_by_evidence_folder( ) end +""" + get_evidence_file_upload_url(file_name) + get_evidence_file_upload_url(file_name, params::Dict{String,<:Any}) + +Creates a presigned Amazon S3 URL that can be used to upload a file as manual evidence. For +instructions on how to use this operation, see Upload a file from your browser in the +Audit Manager User Guide. The following restrictions apply to this operation: Maximum +size of an individual evidence file: 100 MB Number of daily manual evidence uploads per +control: 100 Supported file formats: See Supported file types for manual evidence in the +Audit Manager User Guide For more information about Audit Manager service restrictions, +see Quotas and restrictions for Audit Manager. + +# Arguments +- `file_name`: The file that you want to upload. For a list of supported file formats, see + Supported file types for manual evidence in the Audit Manager User Guide. + +""" +function get_evidence_file_upload_url( + fileName; aws_config::AbstractAWSConfig=global_aws_config() +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}("fileName" => fileName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_evidence_file_upload_url( + fileName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("fileName" => fileName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns an evidence folder from the specified assessment in Audit Manager. + Gets an evidence folder from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1198,7 +1246,7 @@ end get_evidence_folders_by_assessment(assessment_id) get_evidence_folders_by_assessment(assessment_id, params::Dict{String,<:Any}) - Returns the evidence folders from a specified assessment in Audit Manager. + Gets the evidence folders from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1237,8 +1285,8 @@ end get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id) get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id, params::Dict{String,<:Any}) - Returns a list of evidence folders that are associated with a specified control in an -Audit Manager assessment. + Gets a list of evidence folders that are associated with a specified control in an Audit +Manager assessment. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1335,7 +1383,7 @@ end get_organization_admin_account() get_organization_admin_account(params::Dict{String,<:Any}) - Returns the name of the delegated Amazon Web Services administrator account for the + Gets the name of the delegated Amazon Web Services administrator account for a specified organization. """ @@ -1363,9 +1411,13 @@ end get_services_in_scope() get_services_in_scope(params::Dict{String,<:Any}) -Returns a list of all of the Amazon Web Services that you can choose to include in your -assessment. When you create an assessment, specify which of these services you want to -include to narrow the assessment's scope. +Gets a list of the Amazon Web Services from which Audit Manager can collect evidence. +Audit Manager defines which Amazon Web Services are in scope for an assessment. Audit +Manager infers this scope by examining the assessment’s controls and their data sources, +and then mapping this information to one or more of the corresponding Amazon Web Services +that are in this list. For information about why it's no longer possible to specify +services in scope manually, see I can't edit the services in scope for my assessment in the +Troubleshooting section of the Audit Manager user guide. """ function get_services_in_scope(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1385,7 +1437,7 @@ end get_settings(attribute) get_settings(attribute, params::Dict{String,<:Any}) - Returns the settings for the specified Amazon Web Services account. + Gets the settings for a specified Amazon Web Services account. # Arguments - `attribute`: The list of setting attribute enum values. @@ -1425,7 +1477,10 @@ conditions are met, no data is listed for that control. # Arguments - `assessment_id`: The unique identifier for the active assessment. -- `control_domain_id`: The unique identifier for the control domain. +- `control_domain_id`: The unique identifier for the control domain. Audit Manager + supports the control domains that are provided by Amazon Web Services Control Catalog. For + information about how to find a list of available control domains, see ListDomains in the + Amazon Web Services Control Catalog API Reference. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1620,9 +1675,12 @@ end list_control_domain_insights(params::Dict{String,<:Any}) Lists the latest analytics data for control domains across all of your active assessments. - A control domain is listed only if at least one of the controls within that domain -collected evidence on the lastUpdated date of controlDomainInsights. If this condition -isn’t met, no data is listed for that control domain. +Audit Manager supports the control domains that are provided by Amazon Web Services Control +Catalog. For information about how to find a list of available control domains, see +ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is +listed only if at least one of the controls within that domain collected evidence on the +lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed +for that control domain. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1654,10 +1712,13 @@ end list_control_domain_insights_by_assessment(assessment_id) list_control_domain_insights_by_assessment(assessment_id, params::Dict{String,<:Any}) -Lists analytics data for control domains within a specified active assessment. A control -domain is listed only if at least one of the controls within that domain collected evidence -on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is -listed for that domain. +Lists analytics data for control domains within a specified active assessment. Audit +Manager supports the control domains that are provided by Amazon Web Services Control +Catalog. For information about how to find a list of available control domains, see +ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is +listed only if at least one of the controls within that domain collected evidence on the +lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed +for that domain. # Arguments - `assessment_id`: The unique identifier for the active assessment. @@ -1706,7 +1767,10 @@ controlInsightsMetadata. If neither of these conditions are met, no data is list control. # Arguments -- `control_domain_id`: The unique identifier for the control domain. +- `control_domain_id`: The unique identifier for the control domain. Audit Manager + supports the control domains that are provided by Amazon Web Services Control Catalog. For + information about how to find a list of available control domains, see ListDomains in the + Amazon Web Services Control Catalog API Reference. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1750,13 +1814,23 @@ end Returns a list of controls from Audit Manager. # Arguments -- `control_type`: The type of control, such as a standard control or a custom control. +- `control_type`: A filter that narrows the list of controls to a specific type. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Represents the maximum number of results on a page or for an API request - call. -- `"nextToken"`: The pagination token that's used to fetch the next set of results. +- `"controlCatalogId"`: A filter that narrows the list of controls to a specific resource + from the Amazon Web Services Control Catalog. To use this parameter, specify the ARN of + the Control Catalog resource. You can specify either a control domain, a control objective, + or a common control. For information about how to find the ARNs for these resources, see + ListDomains , ListObjectives , and ListCommonControls . You can only filter by one + Control Catalog resource at a time. Specifying multiple resource ARNs isn’t currently + supported. If you want to filter by more than one ARN, we recommend that you run the + ListControls operation separately for each ARN. Alternatively, specify UNCATEGORIZED to + list controls that aren't mapped to a Control Catalog resource. For example, this operation + might return a list of custom controls that don't belong to any control domain or control + objective. +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. """ function list_controls(controlType; aws_config::AbstractAWSConfig=global_aws_config()) return auditmanager( @@ -1787,10 +1861,10 @@ end list_keywords_for_data_source(source) list_keywords_for_data_source(source, params::Dict{String,<:Any}) - Returns a list of keywords that are pre-mapped to the specified control data source. +Returns a list of keywords that are pre-mapped to the specified control data source. # Arguments -- `source`: The control mapping data source that the keywords apply to. +- `source`: The control mapping data source that the keywords apply to. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2435,8 +2509,10 @@ end # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"defaultAssessmentReportsDestination"`: The default storage destination for assessment - reports. +- `"defaultAssessmentReportsDestination"`: The default S3 destination bucket for storing + assessment reports. +- `"defaultExportDestination"`: The default S3 destination bucket for storing evidence + finder exports. - `"defaultProcessOwners"`: A list of the default audit owners. - `"deregistrationPolicy"`: The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager. diff --git a/src/services/auto_scaling.jl b/src/services/auto_scaling.jl index 50b07239ff..00c4e42fe6 100644 --- a/src/services/auto_scaling.jl +++ b/src/services/auto_scaling.jl @@ -15,8 +15,7 @@ desired capacity of the group exceeds the maximum size of the group, the operati If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups. For more -information, see Attach EC2 instances to your Auto Scaling group in the Amazon EC2 Auto -Scaling User Guide. +information, see Detach or attach instances in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -392,8 +391,7 @@ notifications to the target. Create the lifecycle hook. Specify whether the ho when the instances launch or terminate. If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state. If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call. For more -information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling -User Guide. +information, see Complete a lifecycle action in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -459,14 +457,12 @@ functionality for Amazon EC2 Auto Scaling and Amazon EC2. Creates an Auto Scali with the specified name and attributes. If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Quotas for Amazon EC2 Auto Scaling in the Amazon -EC2 Auto Scaling User Guide. For introductory exercises for creating an Auto Scaling group, -see Getting started with Amazon EC2 Auto Scaling and Tutorial: Set up a scaled and -load-balanced application in the Amazon EC2 Auto Scaling User Guide. For more information, -see Auto Scaling groups in the Amazon EC2 Auto Scaling User Guide. Every Auto Scaling group -has three size properties (DesiredCapacity, MaxSize, and MinSize). Usually, you set these -sizes based on a specific number of instances. However, if you configure a mixed instances -policy that defines weights for the instance types, you must specify these sizes with the -same units that you use for weighting instances. +EC2 Auto Scaling User Guide. If you're new to Amazon EC2 Auto Scaling, see the introductory +tutorials in Get started with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User +Guide. Every Auto Scaling group has three size properties (DesiredCapacity, MaxSize, and +MinSize). Usually, you set these sizes based on a specific number of instances. However, if +you configure a mixed instances policy that defines weights for the instance types, you +must specify these sizes with the same units that you use for weighting instances. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. This name must be unique @@ -518,7 +514,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys group. - `"DesiredCapacityType"`: The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance - type selection only. For more information, see Creating an Auto Scaling group using + type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib @@ -530,14 +526,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EC2 Auto Scaling User Guide. Default: 0 seconds - `"HealthCheckType"`: A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot - be disabled. For more information, see Health checks for Auto Scaling instances in the - Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was - previously set. + be disabled. For more information, see Health checks for instances in an Auto Scaling group + in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that + was previously set. - `"InstanceId"`: The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 - DescribeInstances API operation. For more information, see Creating an Auto Scaling group - using an EC2 instance in the Amazon EC2 Auto Scaling User Guide. + DescribeInstances API operation. For more information, see Create an Auto Scaling group + using parameters from an existing instance in the Amazon EC2 Auto Scaling User Guide. +- `"InstanceMaintenancePolicy"`: An instance maintenance policy. For more information, see + Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide. - `"LaunchConfigurationName"`: The name of the launch configuration to use to launch instances. Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId). @@ -545,8 +543,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch instances. Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId). The launch template that is specified must be configured for use with an Auto Scaling - group. For more information, see Creating a launch template for an Auto Scaling group in - the Amazon EC2 Auto Scaling User Guide. + group. For more information, see Create a launch template for an Auto Scaling group in the + Amazon EC2 Auto Scaling User Guide. - `"LifecycleHookSpecificationList"`: One or more lifecycle hooks to add to the Auto Scaling group before instances are launched. - `"LoadBalancerNames"`: A list of Classic Load Balancers associated with this Auto Scaling @@ -554,14 +552,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specify the TargetGroupARNs property instead. - `"MaxInstanceLifetime"`: The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal - to or greater than 86,400 seconds (1 day). For more information, see Replacing Auto Scaling + to or greater than 86,400 seconds (1 day). For more information, see Replace Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide. - `"MixedInstancesPolicy"`: The mixed instances policy. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. - `"NewInstancesProtectedFromScaleIn"`: Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information - about preventing instances from terminating on scale in, see Using instance scale-in + about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. - `"PlacementGroup"`: The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux @@ -588,8 +586,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Guide. - `"TerminationPolicies"`: A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For - more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon - EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | + more information, see Configure termination policies for Amazon EC2 Auto Scaling in the + Amazon EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias - `"TrafficSources"`: The list of traffic sources to attach to this Auto Scaling group. You @@ -669,9 +667,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. If you specify true, each instance in the Auto Scaling group - receives a unique public IPv4 address. For more information, see Launching Auto Scaling - instances in a VPC in the Amazon EC2 Auto Scaling User Guide. If you specify this property, - you must specify at least one subnet for VPCZoneIdentifier when you create your group. + receives a unique public IPv4 address. For more information, see Provide network + connectivity for your Auto Scaling instances using Amazon VPC in the Amazon EC2 Auto + Scaling User Guide. If you specify this property, you must specify at least one subnet for + VPCZoneIdentifier when you create your group. - `"BlockDeviceMappings"`: The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the @@ -690,20 +689,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys IAM role. For more information, see IAM role for applications that run on Amazon EC2 instances in the Amazon EC2 Auto Scaling User Guide. - `"ImageId"`: The ID of the Amazon Machine Image (AMI) that was assigned during - registration. For more information, see Finding a Linux AMI in the Amazon EC2 User Guide - for Linux Instances. If you specify InstanceId, an ImageId is not required. + registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for + Linux Instances. If you specify InstanceId, an ImageId is not required. - `"InstanceId"`: The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping. To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request. For more information, see - Creating a launch configuration using an EC2 instance in the Amazon EC2 Auto Scaling User - Guide. + Create a launch configuration in the Amazon EC2 Auto Scaling User Guide. - `"InstanceMonitoring"`: Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring. The default value is true (enabled). When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates - metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling - Instances in the Amazon EC2 Auto Scaling User Guide. + metrics every 5 minutes. For more information, see Configure monitoring for Auto Scaling + instances in the Amazon EC2 Auto Scaling User Guide. - `"InstanceType"`: Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances. If you specify InstanceId, an InstanceType is not required. @@ -711,23 +709,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances. - `"KeyName"`: The name of the key pair. For more information, see Amazon EC2 key pairs and - Linux instances in the Amazon EC2 User Guide for Linux Instances. + Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances. - `"MetadataOptions"`: The metadata options for the instances. For more information, see - Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide. + Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide. - `"PlacementTenancy"`: The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this - property to dedicated. For more information, see Configuring instance tenancy with Amazon - EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. If you specify - PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you - create your group. Valid values: default | dedicated + property to dedicated. If you specify PlacementTenancy, you must specify at least one + subnet for VPCZoneIdentifier when you create your group. Valid values: default | dedicated - `"RamdiskId"`: The ID of the RAM disk to select. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances. - `"SecurityGroups"`: A list that contains the security group IDs to assign to the - instances in the Auto Scaling group. For more information, see Control traffic to resources - using security groups in the Amazon Virtual Private Cloud User Guide. + instances in the Auto Scaling group. For more information, see Control traffic to your + Amazon Web Services resources using security groups in the Amazon Virtual Private Cloud + User Guide. - `"SpotPrice"`: The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Request Spot Instances for fault-tolerant and @@ -999,8 +996,8 @@ end Deletes the specified scaling policy. Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if -it no longer has an associated action. For more information, see Deleting a scaling policy -in the Amazon EC2 Auto Scaling User Guide. +it no longer has an associated action. For more information, see Delete a scaling policy in +the Amazon EC2 Auto Scaling User Guide. # Arguments - `policy_name`: The name or Amazon Resource Name (ARN) of the policy. @@ -1313,15 +1310,15 @@ end describe_instance_refreshes(auto_scaling_group_name) describe_instance_refreshes(auto_scaling_group_name, params::Dict{String,<:Any}) -Gets information about the instance refreshes for the specified Auto Scaling group. This -operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps -you update instances in your Auto Scaling group after you make configuration changes. To -help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns -information about the instance refreshes you previously initiated, including their status, -start time, end time, the percentage of the instance refresh that is complete, and the -number of instances remaining to update before the instance refresh is complete. If a -rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling -also returns information about the rollback of the instance refresh. +Gets information about the instance refreshes for the specified Auto Scaling group from the +previous six weeks. This operation is part of the instance refresh feature in Amazon EC2 +Auto Scaling, which helps you update instances in your Auto Scaling group after you make +configuration changes. To help you determine the status of an instance refresh, Amazon EC2 +Auto Scaling returns information about the instance refreshes you previously initiated, +including their status, start time, end time, the percentage of the instance refresh that +is complete, and the number of instances remaining to update before the instance refresh is +complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 +Auto Scaling also returns information about the rollback of the instance refresh. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -1698,12 +1695,12 @@ end Gets information about the scaling activities in the account and Region. When scaling events occur, you see a record of the scaling activity in the scaling activities. For more -information, see Verifying a scaling activity for an Auto Scaling group in the Amazon EC2 -Auto Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element -in the response is Successful. If an attempt to launch instances failed, the StatusCode -value is Failed or Cancelled and the StatusMessage element in the response indicates the -cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon -EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. +information, see Verify a scaling activity for an Auto Scaling group in the Amazon EC2 Auto +Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element in +the response is Successful. If an attempt to launch instances failed, the StatusCode value +is Failed or Cancelled and the StatusMessage element in the response indicates the cause of +the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto +Scaling in the Amazon EC2 Auto Scaling User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1839,8 +1836,8 @@ end describe_termination_policy_types(params::Dict{String,<:Any}) Describes the termination policies supported by Amazon EC2 Auto Scaling. For more -information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 -Auto Scaling User Guide. +information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon +EC2 Auto Scaling User Guide. """ function describe_termination_policy_types(; @@ -1971,8 +1968,8 @@ specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling la instances to replace the ones that are detached. If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are -deregistered from the target groups. For more information, see Detach EC2 instances from -your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. +deregistered from the target groups. For more information, see Detach or attach instances +in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -2144,9 +2141,9 @@ end detach_traffic_sources(auto_scaling_group_name, traffic_sources, params::Dict{String,<:Any}) Detaches one or more traffic sources from the specified Auto Scaling group. When you detach -a taffic, it enters the Removing state while deregistering the instances in the group. When -all instances are deregistered, then you can no longer describe the traffic source using -the DescribeTrafficSources API call. The instances continue to run. +a traffic source, it enters the Removing state while deregistering the instances in the +group. When all instances are deregistered, then you can no longer describe the traffic +source using the DescribeTrafficSources API call. The instances continue to run. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -2209,8 +2206,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you omit this - property, all metrics are disabled. For more information, see Auto Scaling group metrics in - the Amazon EC2 Auto Scaling User Guide. + property, all metrics are disabled. For more information, see Amazon CloudWatch metrics for + Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. """ function disable_metrics_collection( AutoScalingGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2267,7 +2264,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you specify Granularity and don't specify any metrics, all metrics are enabled. For more information, - see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide. + see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling + User Guide. """ function enable_metrics_collection( AutoScalingGroupName, Granularity; aws_config::AbstractAWSConfig=global_aws_config() @@ -2628,9 +2626,9 @@ end Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address. This configuration overwrites any existing configuration. For -more information, see Getting Amazon SNS notifications when your Auto Scaling group scales -in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of SNS topics, -which is 10 per Auto Scaling group, the call fails. +more information, see Amazon SNS notification options for Amazon EC2 Auto Scaling in the +Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of SNS topics, which +is 10 per Auto Scaling group, the call fails. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -2711,7 +2709,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Default: None - `"Enabled"`: Indicates whether the scaling policy is enabled or disabled. The default is - enabled. For more information, see Disabling a scaling policy for an Auto Scaling group in + enabled. For more information, see Disable a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. - `"EstimatedInstanceWarmup"`: Not needed if the default instance warmup is defined for the group. The estimated time, in seconds, until a newly launched instance can contribute @@ -2745,8 +2743,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the policy type is PredictiveScaling. - `"ScalingAdjustment"`: The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from - the current capacity. For exact capacity, you must specify a positive value. Required if - the policy type is SimpleScaling. (Not used with any other policy type.) + the current capacity. For exact capacity, you must specify a non-negative value. Required + if the policy type is SimpleScaling. (Not used with any other policy type.) - `"StepAdjustments"`: A set of adjustments that enable you to scale based on the size of the alarm breach. Required if the policy type is StepScaling. (Not used with any other policy type.) @@ -2879,13 +2877,11 @@ end Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet -its new desired capacity. For more information and example configurations, see Warm pools -for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. This operation must -be called from the Region in which the Auto Scaling group was created. This operation -cannot be called on an Auto Scaling group that has a mixed instances policy or a launch -template or launch configuration that requests Spot Instances. You can view the instances -in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm -pool, you can delete it by calling the DeleteWarmPool API. +its new desired capacity. This operation must be called from the Region in which the Auto +Scaling group was created. You can view the instances in the warm pool using the +DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by +calling the DeleteWarmPool API. For more information, see Warm pools for Amazon EC2 Auto +Scaling in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -3017,8 +3013,8 @@ end resume_processes(auto_scaling_group_name, params::Dict{String,<:Any}) Resumes the specified suspended auto scaling processes, or all suspended process, for the -specified Auto Scaling group. For more information, see Suspending and resuming scaling -processes in the Amazon EC2 Auto Scaling User Guide. +specified Auto Scaling group. For more information, see Suspend and resume Amazon EC2 Auto +Scaling processes in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -3060,8 +3056,8 @@ function resume_processes( end """ - rollback_instance_refresh() - rollback_instance_refresh(params::Dict{String,<:Any}) + rollback_instance_refresh(auto_scaling_group_name) + rollback_instance_refresh(auto_scaling_group_name, params::Dict{String,<:Any}) Cancels an instance refresh that is in progress and rolls back any changes that it made. Amazon EC2 Auto Scaling replaces any instances that were replaced during the instance @@ -3076,21 +3072,34 @@ launch template's Latest or Default version. When you receive a successful res this operation, Amazon EC2 Auto Scaling immediately begins replacing instances. You can check the status of this operation through the DescribeInstanceRefreshes API operation. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AutoScalingGroupName"`: The name of the Auto Scaling group. +# Arguments +- `auto_scaling_group_name`: The name of the Auto Scaling group. + """ -function rollback_instance_refresh(; aws_config::AbstractAWSConfig=global_aws_config()) +function rollback_instance_refresh( + AutoScalingGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) return auto_scaling( - "RollbackInstanceRefresh"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "RollbackInstanceRefresh", + Dict{String,Any}("AutoScalingGroupName" => AutoScalingGroupName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function rollback_instance_refresh( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + AutoScalingGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return auto_scaling( "RollbackInstanceRefresh", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AutoScalingGroupName" => AutoScalingGroupName), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -3158,7 +3167,7 @@ end set_instance_health(health_status, instance_id, params::Dict{String,<:Any}) Sets the health status of the specified instance. For more information, see Health checks -for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. +for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. # Arguments - `health_status`: The health status of the instance. Set to Healthy to have the instance @@ -3171,8 +3180,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ShouldRespectGracePeriod"`: If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with - the group. For more information about the health check grace period, see - CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API Reference. + the group. For more information about the health check grace period, see Set the health + check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. """ function set_instance_health( HealthStatus, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3211,8 +3220,7 @@ end set_instance_protection(auto_scaling_group_name, instance_ids, protected_from_scale_in, params::Dict{String,<:Any}) Updates the instance protection settings of the specified instances. This operation cannot -be called on instances in a warm pool. For more information about preventing instances that -are part of an Auto Scaling group from terminating on scale in, see Using instance scale-in +be called on instances in a warm pool. For more information, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails. @@ -3269,27 +3277,24 @@ end start_instance_refresh(auto_scaling_group_name) start_instance_refresh(auto_scaling_group_name, params::Dict{String,<:Any}) -Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling performs a -rolling update of instances in an Auto Scaling group. Instances are terminated first and -then replaced, which temporarily reduces the capacity available within your Auto Scaling -group. This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, -which helps you update instances in your Auto Scaling group. This feature is helpful, for -example, when you have a new AMI or a new user data script. You just need to create a new -launch template that specifies the new AMI or user data script. Then start an instance -refresh to immediately begin the process of updating instances in the group. If -successful, the request's response contains a unique ID that you can use to track the -progress of the instance refresh. To query its status, call the DescribeInstanceRefreshes -API. To describe the instance refreshes that have already run, call the -DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, use the -CancelInstanceRefresh API. An instance refresh might fail for several reasons, such as EC2 -launch failures, misconfigured health checks, or not ignoring or allowing the termination -of instances that are in Standby state or protected from scale in. You can monitor for -failed EC2 launches using the scaling activities. To find the scaling activities, call the -DescribeScalingActivities API. If you enable auto rollback, your Auto Scaling group will be -rolled back automatically when the instance refresh fails. You can enable this feature -before starting an instance refresh by specifying the AutoRollback property in the instance -refresh preferences. Otherwise, to roll back an instance refresh before it finishes, use -the RollbackInstanceRefresh API. +Starts an instance refresh. This operation is part of the instance refresh feature in +Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group. This +feature is helpful, for example, when you have a new AMI or a new user data script. You +just need to create a new launch template that specifies the new AMI or user data script. +Then start an instance refresh to immediately begin the process of updating instances in +the group. If successful, the request's response contains a unique ID that you can use to +track the progress of the instance refresh. To query its status, call the +DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, +call the DescribeInstanceRefreshes API. To cancel an instance refresh that is in progress, +use the CancelInstanceRefresh API. An instance refresh might fail for several reasons, +such as EC2 launch failures, misconfigured health checks, or not ignoring or allowing the +termination of instances that are in Standby state or protected from scale in. You can +monitor for failed EC2 launches using the scaling activities. To find the scaling +activities, call the DescribeScalingActivities API. If you enable auto rollback, your Auto +Scaling group will be rolled back automatically when the instance refresh fails. You can +enable this feature before starting an instance refresh by specifying the AutoRollback +property in the instance refresh preferences. Otherwise, to roll back an instance refresh +before it finishes, use the RollbackInstanceRefresh API. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -3306,11 +3311,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys template and instance types. This can help you reduce the number of replacements that are required to apply updates. - `"Preferences"`: Sets your preferences for the instance refresh so that it performs as - expected when you start it. Includes the instance warmup time, the minimum healthy - percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances - that are in Standby state or protected from scale in are found. You can also choose to - enable additional features, such as the following: Auto rollback Checkpoints Skip - matching + expected when you start it. Includes the instance warmup time, the minimum and maximum + healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if + instances that are in Standby state or protected from scale in are found. You can also + choose to enable additional features, such as the following: Auto rollback Checkpoints + CloudWatch alarms Skip matching - `"Strategy"`: The strategy to use for the instance refresh. The only valid value is Rolling. """ @@ -3349,9 +3354,9 @@ end Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group. If you suspend either the Launch or Terminate process types, it can prevent -other process types from functioning properly. For more information, see Suspending and -resuming scaling processes in the Amazon EC2 Auto Scaling User Guide. To resume processes -that have been suspended, call the ResumeProcesses API. +other process types from functioning properly. For more information, see Suspend and resume +Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide. To resume +processes that have been suspended, call the ResumeProcesses API. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -3405,8 +3410,8 @@ desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones terminated. By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and -rebalancing might terminate instances in other zones. For more information, see Rebalancing -activities in the Amazon EC2 Auto Scaling User Guide. +rebalancing might terminate instances in other zones. For more information, see Manual +scaling in the Amazon EC2 Auto Scaling User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -3517,7 +3522,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the maximum size of the group. - `"DesiredCapacityType"`: The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance - type selection only. For more information, see Creating an Auto Scaling group using + type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib @@ -3529,9 +3534,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EC2 Auto Scaling User Guide. - `"HealthCheckType"`: A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot - be disabled. For more information, see Health checks for Auto Scaling instances in the - Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was - previously set. + be disabled. For more information, see Health checks for instances in an Auto Scaling group + in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that + was previously set. +- `"InstanceMaintenancePolicy"`: An instance maintenance policy. For more information, see + Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide. - `"LaunchConfigurationName"`: The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy. @@ -3554,7 +3561,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Scaling User Guide. - `"NewInstancesProtectedFromScaleIn"`: Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information - about preventing instances from terminating on scale in, see Using instance scale-in + about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. - `"PlacementGroup"`: The name of an existing placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for @@ -3566,8 +3573,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Service-linked roles in the Amazon EC2 Auto Scaling User Guide. - `"TerminationPolicies"`: A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more - information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 - Auto Scaling User Guide. Valid values: Default | AllocationStrategy | + information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon + EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias - `"VPCZoneIdentifier"`: A comma-separated list of subnet IDs for a virtual private cloud diff --git a/src/services/b2bi.jl b/src/services/b2bi.jl new file mode 100644 index 0000000000..b70935a5ac --- /dev/null +++ b/src/services/b2bi.jl @@ -0,0 +1,1182 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: b2bi +using AWS.Compat +using AWS.UUIDs + +""" + create_capability(configuration, name, type) + create_capability(configuration, name, type, params::Dict{String,<:Any}) + +Instantiates a capability based on the specified parameters. A trading capability contains +the information required to transform incoming EDI documents into JSON or XML outputs. + +# Arguments +- `configuration`: Specifies a structure that contains the details for a capability. +- `name`: Specifies the name of the capability, used to identify it. +- `type`: Specifies the type of the capability. Currently, only edi is supported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Reserved for future use. +- `"instructionsDocuments"`: Specifies one or more locations in Amazon S3, each specifying + an EDI document that can be used with this capability. Each item contains the name of the + bucket and the key, to identify the document's location. +- `"tags"`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. +""" +function create_capability( + configuration, name, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "CreateCapability", + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_capability( + configuration, + name, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "CreateCapability", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_partnership(capabilities, email, name, profile_id) + create_partnership(capabilities, email, name, profile_id, params::Dict{String,<:Any}) + +Creates a partnership between a customer and a trading partner, based on the supplied +parameters. A partnership represents the connection between you and your trading partner. +It ties together a profile and one or more trading capabilities. + +# Arguments +- `capabilities`: Specifies a list of the capabilities associated with this partnership. +- `email`: Specifies the email address associated with this trading partner. +- `name`: Specifies a descriptive name for the partnership. +- `profile_id`: Specifies the unique, system-generated identifier for the profile connected + to this partnership. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Reserved for future use. +- `"phone"`: Specifies the phone number associated with the partnership. +- `"tags"`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. +""" +function create_partnership( + capabilities, email, name, profileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "CreatePartnership", + Dict{String,Any}( + "capabilities" => capabilities, + "email" => email, + "name" => name, + "profileId" => profileId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_partnership( + capabilities, + email, + name, + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "CreatePartnership", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "capabilities" => capabilities, + "email" => email, + "name" => name, + "profileId" => profileId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_profile(business_name, logging, name, phone) + create_profile(business_name, logging, name, phone, params::Dict{String,<:Any}) + +Creates a customer profile. You can have up to five customer profiles, each representing a +distinct private network. A profile is the mechanism used to create the concept of a +private network. + +# Arguments +- `business_name`: Specifies the name for the business associated with this profile. +- `logging`: Specifies whether or not logging is enabled for this profile. +- `name`: Specifies the name of the profile. +- `phone`: Specifies the phone number associated with the profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Reserved for future use. +- `"email"`: Specifies the email address associated with this customer profile. +- `"tags"`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. +""" +function create_profile( + businessName, logging, name, phone; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "CreateProfile", + Dict{String,Any}( + "businessName" => businessName, + "logging" => logging, + "name" => name, + "phone" => phone, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_profile( + businessName, + logging, + name, + phone, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "CreateProfile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "businessName" => businessName, + "logging" => logging, + "name" => name, + "phone" => phone, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_transformer(edi_type, file_format, mapping_template, name) + create_transformer(edi_type, file_format, mapping_template, name, params::Dict{String,<:Any}) + +Creates a transformer. A transformer describes how to process the incoming EDI documents +and extract the necessary information to the output file. + +# Arguments +- `edi_type`: Specifies the details for the EDI standard that is being used for the + transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding + messages that define specific business documents. +- `file_format`: Specifies that the currently supported file formats for EDI + transformations are JSON and XML. +- `mapping_template`: Specifies the mapping template for the transformer. This template is + used to map the parsed EDI file using JSONata or XSLT. +- `name`: Specifies the name of the transformer, used to identify it. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Reserved for future use. +- `"sampleDocument"`: Specifies a sample EDI document that is used by a transformer as a + guide for processing the EDI data. +- `"tags"`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. +""" +function create_transformer( + ediType, + fileFormat, + mappingTemplate, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "CreateTransformer", + Dict{String,Any}( + "ediType" => ediType, + "fileFormat" => fileFormat, + "mappingTemplate" => mappingTemplate, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_transformer( + ediType, + fileFormat, + mappingTemplate, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "CreateTransformer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ediType" => ediType, + "fileFormat" => fileFormat, + "mappingTemplate" => mappingTemplate, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_capability(capability_id) + delete_capability(capability_id, params::Dict{String,<:Any}) + +Deletes the specified capability. A trading capability contains the information required to +transform incoming EDI documents into JSON or XML outputs. + +# Arguments +- `capability_id`: Specifies a system-assigned unique identifier for the capability. + +""" +function delete_capability(capabilityId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "DeleteCapability", + Dict{String,Any}("capabilityId" => capabilityId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_capability( + capabilityId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "DeleteCapability", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("capabilityId" => capabilityId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_partnership(partnership_id) + delete_partnership(partnership_id, params::Dict{String,<:Any}) + +Deletes the specified partnership. A partnership represents the connection between you and +your trading partner. It ties together a profile and one or more trading capabilities. + +# Arguments +- `partnership_id`: Specifies the unique, system-generated identifier for a partnership. + +""" +function delete_partnership( + partnershipId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "DeletePartnership", + Dict{String,Any}("partnershipId" => partnershipId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_partnership( + partnershipId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "DeletePartnership", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("partnershipId" => partnershipId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_profile(profile_id) + delete_profile(profile_id, params::Dict{String,<:Any}) + +Deletes the specified profile. A profile is the mechanism used to create the concept of a +private network. + +# Arguments +- `profile_id`: Specifies the unique, system-generated identifier for the profile. + +""" +function delete_profile(profileId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "DeleteProfile", + Dict{String,Any}("profileId" => profileId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_profile( + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "DeleteProfile", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("profileId" => profileId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_transformer(transformer_id) + delete_transformer(transformer_id, params::Dict{String,<:Any}) + +Deletes the specified transformer. A transformer describes how to process the incoming EDI +documents and extract the necessary information to the output file. + +# Arguments +- `transformer_id`: Specifies the system-assigned unique identifier for the transformer. + +""" +function delete_transformer( + transformerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "DeleteTransformer", + Dict{String,Any}("transformerId" => transformerId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_transformer( + transformerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "DeleteTransformer", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("transformerId" => transformerId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_capability(capability_id) + get_capability(capability_id, params::Dict{String,<:Any}) + +Retrieves the details for the specified capability. A trading capability contains the +information required to transform incoming EDI documents into JSON or XML outputs. + +# Arguments +- `capability_id`: Specifies a system-assigned unique identifier for the capability. + +""" +function get_capability(capabilityId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "GetCapability", + Dict{String,Any}("capabilityId" => capabilityId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_capability( + capabilityId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "GetCapability", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("capabilityId" => capabilityId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_partnership(partnership_id) + get_partnership(partnership_id, params::Dict{String,<:Any}) + +Retrieves the details for a partnership, based on the partner and profile IDs specified. A +partnership represents the connection between you and your trading partner. It ties +together a profile and one or more trading capabilities. + +# Arguments +- `partnership_id`: Specifies the unique, system-generated identifier for a partnership. + +""" +function get_partnership(partnershipId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "GetPartnership", + Dict{String,Any}("partnershipId" => partnershipId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_partnership( + partnershipId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "GetPartnership", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("partnershipId" => partnershipId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_profile(profile_id) + get_profile(profile_id, params::Dict{String,<:Any}) + +Retrieves the details for the profile specified by the profile ID. A profile is the +mechanism used to create the concept of a private network. + +# Arguments +- `profile_id`: Specifies the unique, system-generated identifier for the profile. + +""" +function get_profile(profileId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "GetProfile", + Dict{String,Any}("profileId" => profileId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_profile( + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "GetProfile", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("profileId" => profileId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_transformer(transformer_id) + get_transformer(transformer_id, params::Dict{String,<:Any}) + +Retrieves the details for the transformer specified by the transformer ID. A transformer +describes how to process the incoming EDI documents and extract the necessary information +to the output file. + +# Arguments +- `transformer_id`: Specifies the system-assigned unique identifier for the transformer. + +""" +function get_transformer(transformerId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "GetTransformer", + Dict{String,Any}("transformerId" => transformerId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_transformer( + transformerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "GetTransformer", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("transformerId" => transformerId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_transformer_job(transformer_id, transformer_job_id) + get_transformer_job(transformer_id, transformer_job_id, params::Dict{String,<:Any}) + +Returns the details of the transformer run, based on the Transformer job ID. + +# Arguments +- `transformer_id`: Specifies the system-assigned unique identifier for the transformer. +- `transformer_job_id`: Specifies the unique, system-generated identifier for a transformer + run. + +""" +function get_transformer_job( + transformerId, transformerJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "GetTransformerJob", + Dict{String,Any}( + "transformerId" => transformerId, "transformerJobId" => transformerJobId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_transformer_job( + transformerId, + transformerJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "GetTransformerJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "transformerId" => transformerId, "transformerJobId" => transformerJobId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_capabilities() + list_capabilities(params::Dict{String,<:Any}) + +Lists the capabilities associated with your Amazon Web Services account for your current or +specified region. A trading capability contains the information required to transform +incoming EDI documents into JSON or XML outputs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Specifies the maximum number of capabilities to return. +- `"nextToken"`: When additional results are obtained from the command, a NextToken + parameter is returned in the output. You can then pass the NextToken parameter in a + subsequent command to continue listing additional resources. +""" +function list_capabilities(; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi("ListCapabilities"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_capabilities( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "ListCapabilities", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_partnerships() + list_partnerships(params::Dict{String,<:Any}) + +Lists the partnerships associated with your Amazon Web Services account for your current or +specified region. A partnership represents the connection between you and your trading +partner. It ties together a profile and one or more trading capabilities. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Specifies the maximum number of capabilities to return. +- `"nextToken"`: When additional results are obtained from the command, a NextToken + parameter is returned in the output. You can then pass the NextToken parameter in a + subsequent command to continue listing additional resources. +- `"profileId"`: Specifies the unique, system-generated identifier for the profile + connected to this partnership. +""" +function list_partnerships(; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi("ListPartnerships"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_partnerships( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "ListPartnerships", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_profiles() + list_profiles(params::Dict{String,<:Any}) + +Lists the profiles associated with your Amazon Web Services account for your current or +specified region. A profile is the mechanism used to create the concept of a private +network. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Specifies the maximum number of profiles to return. +- `"nextToken"`: When additional results are obtained from the command, a NextToken + parameter is returned in the output. You can then pass the NextToken parameter in a + subsequent command to continue listing additional resources. +""" +function list_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi("ListProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_profiles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "ListProfiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The +resource can be a capability, partnership, profile, or transformer. + +# Arguments +- `resource_arn`: Requests the tags associated with a particular Amazon Resource Name + (ARN). An ARN is an identifier for a specific Amazon Web Services resource, such as a + capability, partnership, profile, or transformer. + +""" +function list_tags_for_resource( + ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "ListTagsForResource", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_transformers() + list_transformers(params::Dict{String,<:Any}) + +Lists the available transformers. A transformer describes how to process the incoming EDI +documents and extract the necessary information to the output file. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Specifies the number of items to return for the API response. +- `"nextToken"`: When additional results are obtained from the command, a NextToken + parameter is returned in the output. You can then pass the NextToken parameter in a + subsequent command to continue listing additional resources. +""" +function list_transformers(; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi("ListTransformers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_transformers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "ListTransformers", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + start_transformer_job(input_file, output_location, transformer_id) + start_transformer_job(input_file, output_location, transformer_id, params::Dict{String,<:Any}) + +Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into +the output structures used by Amazon Web Services B2BI Data Interchange. If you only want +to transform EDI (electronic data interchange) documents, you don't need to create +profiles, partnerships or capabilities. Just create and configure a transformer, and then +run the StartTransformerJob API to process your files. + +# Arguments +- `input_file`: Specifies the location of the input file for the transformation. The + location consists of an Amazon S3 bucket and prefix. +- `output_location`: Specifies the location of the output file for the transformation. The + location consists of an Amazon S3 bucket and prefix. +- `transformer_id`: Specifies the system-assigned unique identifier for the transformer. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Reserved for future use. +""" +function start_transformer_job( + inputFile, + outputLocation, + transformerId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "StartTransformerJob", + Dict{String,Any}( + "inputFile" => inputFile, + "outputLocation" => outputLocation, + "transformerId" => transformerId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_transformer_job( + inputFile, + outputLocation, + transformerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "StartTransformerJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputFile" => inputFile, + "outputLocation" => outputLocation, + "transformerId" => transformerId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). +Resources are capability, partnership, profile, transformers and other entities. There is +no response returned from this call. + +# Arguments +- `resource_arn`: Specifies an Amazon Resource Name (ARN) for a specific Amazon Web + Services resource, such as a capability, partnership, profile, or transformer. +- `tags`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. + +""" +function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "TagResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceARN, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + test_mapping(file_format, input_file_content, mapping_template) + test_mapping(file_format, input_file_content, mapping_template, params::Dict{String,<:Any}) + +Maps the input file according to the provided template file. The API call downloads the +file contents from the Amazon S3 location, and passes the contents in as a string, to the +inputFileContent parameter. + +# Arguments +- `file_format`: Specifies that the currently supported file formats for EDI + transformations are JSON and XML. +- `input_file_content`: Specify the contents of the EDI (electronic data interchange) XML + or JSON file that is used as input for the transform. +- `mapping_template`: Specifies the mapping template for the transformer. This template is + used to map the parsed EDI file using JSONata or XSLT. + +""" +function test_mapping( + fileFormat, + inputFileContent, + mappingTemplate; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "TestMapping", + Dict{String,Any}( + "fileFormat" => fileFormat, + "inputFileContent" => inputFileContent, + "mappingTemplate" => mappingTemplate, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_mapping( + fileFormat, + inputFileContent, + mappingTemplate, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "TestMapping", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "fileFormat" => fileFormat, + "inputFileContent" => inputFileContent, + "mappingTemplate" => mappingTemplate, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + test_parsing(edi_type, file_format, input_file) + test_parsing(edi_type, file_format, input_file, params::Dict{String,<:Any}) + +Parses the input EDI (electronic data interchange) file. The input file has a file size +limit of 250 KB. + +# Arguments +- `edi_type`: Specifies the details for the EDI standard that is being used for the + transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding + messages that define specific business documents. +- `file_format`: Specifies that the currently supported file formats for EDI + transformations are JSON and XML. +- `input_file`: Specifies an S3Location object, which contains the Amazon S3 bucket and + prefix for the location of the input file. + +""" +function test_parsing( + ediType, fileFormat, inputFile; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "TestParsing", + Dict{String,Any}( + "ediType" => ediType, "fileFormat" => fileFormat, "inputFile" => inputFile + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_parsing( + ediType, + fileFormat, + inputFile, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "TestParsing", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ediType" => ediType, + "fileFormat" => fileFormat, + "inputFile" => inputFile, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Detaches a key-value pair from the specified resource, as identified by its Amazon Resource +Name (ARN). Resources are capability, partnership, profile, transformers and other entities. + +# Arguments +- `resource_arn`: Specifies an Amazon Resource Name (ARN) for a specific Amazon Web + Services resource, such as a capability, partnership, profile, or transformer. +- `tag_keys`: Specifies the key-value pairs assigned to ARNs that you can use to group and + search for resources by type. You can attach this metadata to resources (capabilities, + partnerships, and so on) for any purpose. + +""" +function untag_resource( + ResourceARN, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "UntagResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceARN, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_capability(capability_id) + update_capability(capability_id, params::Dict{String,<:Any}) + +Updates some of the parameters for a capability, based on the specified parameters. A +trading capability contains the information required to transform incoming EDI documents +into JSON or XML outputs. + +# Arguments +- `capability_id`: Specifies a system-assigned unique identifier for the capability. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: Specifies a structure that contains the details for a capability. +- `"instructionsDocuments"`: Specifies one or more locations in Amazon S3, each specifying + an EDI document that can be used with this capability. Each item contains the name of the + bucket and the key, to identify the document's location. +- `"name"`: Specifies a new name for the capability, to replace the existing name. +""" +function update_capability(capabilityId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "UpdateCapability", + Dict{String,Any}("capabilityId" => capabilityId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_capability( + capabilityId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "UpdateCapability", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("capabilityId" => capabilityId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_partnership(partnership_id) + update_partnership(partnership_id, params::Dict{String,<:Any}) + +Updates some of the parameters for a partnership between a customer and trading partner. A +partnership represents the connection between you and your trading partner. It ties +together a profile and one or more trading capabilities. + +# Arguments +- `partnership_id`: Specifies the unique, system-generated identifier for a partnership. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"capabilities"`: List of the capabilities associated with this partnership. +- `"name"`: The name of the partnership, used to identify it. +""" +function update_partnership( + partnershipId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "UpdatePartnership", + Dict{String,Any}("partnershipId" => partnershipId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_partnership( + partnershipId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "UpdatePartnership", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("partnershipId" => partnershipId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_profile(profile_id) + update_profile(profile_id, params::Dict{String,<:Any}) + +Updates the specified parameters for a profile. A profile is the mechanism used to create +the concept of a private network. + +# Arguments +- `profile_id`: Specifies the unique, system-generated identifier for the profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"businessName"`: Specifies the name for the business associated with this profile. +- `"email"`: Specifies the email address associated with this customer profile. +- `"name"`: The name of the profile, used to identify it. +- `"phone"`: Specifies the phone number associated with the profile. +""" +function update_profile(profileId; aws_config::AbstractAWSConfig=global_aws_config()) + return b2bi( + "UpdateProfile", + Dict{String,Any}("profileId" => profileId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_profile( + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "UpdateProfile", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("profileId" => profileId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_transformer(transformer_id) + update_transformer(transformer_id, params::Dict{String,<:Any}) + +Updates the specified parameters for a transformer. A transformer describes how to process +the incoming EDI documents and extract the necessary information to the output file. + +# Arguments +- `transformer_id`: Specifies the system-assigned unique identifier for the transformer. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ediType"`: Specifies the details for the EDI standard that is being used for the + transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding + messages that define specific business documents. +- `"fileFormat"`: Specifies that the currently supported file formats for EDI + transformations are JSON and XML. +- `"mappingTemplate"`: Specifies the mapping template for the transformer. This template is + used to map the parsed EDI file using JSONata or XSLT. +- `"name"`: Specify a new name for the transformer, if you want to update it. +- `"sampleDocument"`: Specifies a sample EDI document that is used by a transformer as a + guide for processing the EDI data. +- `"status"`: Specifies the transformer's status. You can update the state of the + transformer, from active to inactive, or inactive to active. +""" +function update_transformer( + transformerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return b2bi( + "UpdateTransformer", + Dict{String,Any}("transformerId" => transformerId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_transformer( + transformerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return b2bi( + "UpdateTransformer", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("transformerId" => transformerId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/backup.jl b/src/services/backup.jl index d63dd190e3..c40b2802ec 100644 --- a/src/services/backup.jl +++ b/src/services/backup.jl @@ -319,6 +319,80 @@ function create_legal_hold( ) end +""" + create_logically_air_gapped_backup_vault(max_retention_days, min_retention_days, backup_vault_name) + create_logically_air_gapped_backup_vault(max_retention_days, min_retention_days, backup_vault_name, params::Dict{String,<:Any}) + +This request creates a logical container to where backups may be copied. This request +includes a name, the Region, the maximum number of retention days, the minimum number of +retention days, and optionally can include tags and a creator request ID. Do not include +sensitive data, such as passport numbers, in the name of a backup vault. + +# Arguments +- `max_retention_days`: This is the setting that specifies the maximum retention period + that the vault retains its recovery points. If this parameter is not specified, Backup does + not enforce a maximum retention period on the recovery points in the vault (allowing + indefinite storage). If specified, any backup or copy job to the vault must have a + lifecycle policy with a retention period equal to or shorter than the maximum retention + period. If the job retention period is longer than that maximum retention period, then the + vault fails the backup or copy job, and you should either modify your lifecycle settings or + use a different vault. +- `min_retention_days`: This setting specifies the minimum retention period that the vault + retains its recovery points. If this parameter is not specified, no minimum retention + period is enforced. If specified, any backup or copy job to the vault must have a lifecycle + policy with a retention period equal to or longer than the minimum retention period. If a + job retention period is shorter than that minimum retention period, then the vault fails + the backup or copy job, and you should either modify your lifecycle settings or use a + different vault. +- `backup_vault_name`: This is the name of the vault that is being created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"BackupVaultTags"`: These are the tags that will be included in the newly-created vault. +- `"CreatorRequestId"`: This is the ID of the creation request. This parameter is optional. + If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. +""" +function create_logically_air_gapped_backup_vault( + MaxRetentionDays, + MinRetentionDays, + backupVaultName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/logically-air-gapped-backup-vaults/$(backupVaultName)", + Dict{String,Any}( + "MaxRetentionDays" => MaxRetentionDays, "MinRetentionDays" => MinRetentionDays + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_logically_air_gapped_backup_vault( + MaxRetentionDays, + MinRetentionDays, + backupVaultName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/logically-air-gapped-backup-vaults/$(backupVaultName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MaxRetentionDays" => MaxRetentionDays, + "MinRetentionDays" => MinRetentionDays, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_report_plan(report_delivery_channel, report_plan_name, report_setting) create_report_plan(report_delivery_channel, report_plan_name, report_setting, params::Dict{String,<:Any}) @@ -396,6 +470,126 @@ function create_report_plan( ) end +""" + create_restore_testing_plan(restore_testing_plan) + create_restore_testing_plan(restore_testing_plan, params::Dict{String,<:Any}) + +This is the first of two steps to create a restore testing plan; once this request is +successful, finish the procedure with request CreateRestoreTestingSelection. You must +include the parameter RestoreTestingPlan. You may optionally include CreatorRequestId and +Tags. + +# Arguments +- `restore_testing_plan`: A restore testing plan must contain a unique + RestoreTestingPlanName string you create and must contain a ScheduleExpression cron. You + may optionally include a StartWindowHours integer and a CreatorRequestId string. The + RestoreTestingPlanName is a unique string that is the name of the restore testing plan. + This cannot be changed after creation, and it must consist of only alphanumeric characters + and underscores. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreatorRequestId"`: This is a unique string that identifies the request and allows + failed requests to be retriedwithout the risk of running the operation twice. This + parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' + characters. +- `"Tags"`: Optional tags to include. A tag is a key-value pair you can use to manage, + filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, + spaces, and the following characters: + - = . _ : /. +""" +function create_restore_testing_plan( + RestoreTestingPlan; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "PUT", + "/restore-testing/plans", + Dict{String,Any}("RestoreTestingPlan" => RestoreTestingPlan); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_restore_testing_plan( + RestoreTestingPlan, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RestoreTestingPlan" => RestoreTestingPlan), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_restore_testing_selection(restore_testing_plan_name, restore_testing_selection) + create_restore_testing_selection(restore_testing_plan_name, restore_testing_selection, params::Dict{String,<:Any}) + +This request can be sent after CreateRestoreTestingPlan request returns successfully. This +is the second part of creating a resource testing plan, and it must be completed +sequentially. This consists of RestoreTestingSelectionName, ProtectedResourceType, and one +of the following: ProtectedResourceArns ProtectedResourceConditions Each +protected resource type can have one single value. A restore testing selection can include +a wildcard value (\"*\") for ProtectedResourceArns along with ProtectedResourceConditions. +Alternatively, you can include up to 30 specific protected resource ARNs in +ProtectedResourceArns. Cannot select by both protected resource types AND specific ARNs. +Request will fail if both are included. + +# Arguments +- `restore_testing_plan_name`: Input the restore testing plan name that was returned from + the related CreateRestoreTestingPlan request. +- `restore_testing_selection`: This consists of RestoreTestingSelectionName, + ProtectedResourceType, and one of the following: ProtectedResourceArns + ProtectedResourceConditions Each protected resource type can have one single value. A + restore testing selection can include a wildcard value (\"*\") for ProtectedResourceArns + along with ProtectedResourceConditions. Alternatively, you can include up to 30 specific + protected resource ARNs in ProtectedResourceArns. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreatorRequestId"`: This is an optional unique string that identifies the request and + allows failed requests to be retried without the risk of running the operation twice. If + used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. +""" +function create_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelection; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections", + Dict{String,Any}("RestoreTestingSelection" => RestoreTestingSelection); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelection, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RestoreTestingSelection" => RestoreTestingSelection), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_backup_plan(backup_plan_id) delete_backup_plan(backup_plan_id, params::Dict{String,<:Any}) @@ -730,6 +924,84 @@ function delete_report_plan( ) end +""" + delete_restore_testing_plan(restore_testing_plan_name) + delete_restore_testing_plan(restore_testing_plan_name, params::Dict{String,<:Any}) + +This request deletes the specified restore testing plan. Deletion can only successfully +occur if all associated restore testing selections are deleted first. + +# Arguments +- `restore_testing_plan_name`: Required unique name of the restore testing plan you wish to + delete. + +""" +function delete_restore_testing_plan( + RestoreTestingPlanName; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "DELETE", + "/restore-testing/plans/$(RestoreTestingPlanName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_restore_testing_plan( + RestoreTestingPlanName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "DELETE", + "/restore-testing/plans/$(RestoreTestingPlanName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_restore_testing_selection(restore_testing_plan_name, restore_testing_selection_name) + delete_restore_testing_selection(restore_testing_plan_name, restore_testing_selection_name, params::Dict{String,<:Any}) + +Input the Restore Testing Plan name and Restore Testing Selection name. All testing +selections associated with a restore testing plan must be deleted before the restore +testing plan can be deleted. + +# Arguments +- `restore_testing_plan_name`: Required unique name of the restore testing plan that + contains the restore testing selection you wish to delete. +- `restore_testing_selection_name`: Required unique name of the restore testing selection + you wish to delete. + +""" +function delete_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelectionName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "DELETE", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelectionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "DELETE", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_backup_job(backup_job_id) describe_backup_job(backup_job_id, params::Dict{String,<:Any}) @@ -774,6 +1046,9 @@ Returns metadata about a backup vault specified by its name. Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. """ function describe_backup_vault( backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() @@ -943,6 +1218,9 @@ lifecycle. point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. """ function describe_recovery_point( backupVaultName, recoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1487,6 +1765,9 @@ Returns a set of metadata key-value pairs that were used to create the backup. point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. """ function get_recovery_point_restore_metadata( backupVaultName, recoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1514,26 +1795,261 @@ function get_recovery_point_restore_metadata( end """ - get_supported_resource_types() - get_supported_resource_types(params::Dict{String,<:Any}) + get_restore_job_metadata(restore_job_id) + get_restore_job_metadata(restore_job_id, params::Dict{String,<:Any}) + +This request returns the metadata for the specified restore job. + +# Arguments +- `restore_job_id`: This is a unique identifier of a restore job within Backup. + +""" +function get_restore_job_metadata( + restoreJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/restore-jobs/$(restoreJobId)/metadata"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_restore_job_metadata( + restoreJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-jobs/$(restoreJobId)/metadata", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_restore_testing_inferred_metadata(backup_vault_name, recovery_point_arn) + get_restore_testing_inferred_metadata(backup_vault_name, recovery_point_arn, params::Dict{String,<:Any}) + +This request returns the minimal required set of metadata needed to start a restore job +with secure default settings. BackupVaultName and RecoveryPointArn are required parameters. +BackupVaultAccountId is an optional parameter. + +# Arguments +- `backup_vault_name`: The name of a logical container where backups are stored. Backup + vaults are identified by names that are unique to the account used to create them and the + Amazon Web ServicesRegion where they are created. They consist of letters, numbers, and + hyphens. +- `recovery_point_arn`: An Amazon Resource Name (ARN) that uniquely identifies a recovery + point; for example, + arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"BackupVaultAccountId"`: This is the account ID of the specified backup vault. +""" +function get_restore_testing_inferred_metadata( + BackupVaultName, RecoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/restore-testing/inferred-metadata", + Dict{String,Any}( + "BackupVaultName" => BackupVaultName, "RecoveryPointArn" => RecoveryPointArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_restore_testing_inferred_metadata( + BackupVaultName, + RecoveryPointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-testing/inferred-metadata", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "BackupVaultName" => BackupVaultName, + "RecoveryPointArn" => RecoveryPointArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_restore_testing_plan(restore_testing_plan_name) + get_restore_testing_plan(restore_testing_plan_name, params::Dict{String,<:Any}) + +Returns RestoreTestingPlan details for the specified RestoreTestingPlanName. The details +are the body of a restore testing plan in JSON format, in addition to plan metadata. + +# Arguments +- `restore_testing_plan_name`: Required unique name of the restore testing plan. + +""" +function get_restore_testing_plan( + RestoreTestingPlanName; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_restore_testing_plan( + RestoreTestingPlanName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_restore_testing_selection(restore_testing_plan_name, restore_testing_selection_name) + get_restore_testing_selection(restore_testing_plan_name, restore_testing_selection_name, params::Dict{String,<:Any}) + +Returns RestoreTestingSelection, which displays resources and elements of the restore +testing plan. + +# Arguments +- `restore_testing_plan_name`: Required unique name of the restore testing plan. +- `restore_testing_selection_name`: Required unique name of the restore testing selection. + +""" +function get_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelectionName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelectionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_supported_resource_types() + get_supported_resource_types(params::Dict{String,<:Any}) + +Returns the Amazon Web Services resource types supported by Backup. + +""" +function get_supported_resource_types(; aws_config::AbstractAWSConfig=global_aws_config()) + return backup( + "GET", + "/supported-resource-types"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_supported_resource_types( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/supported-resource-types", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_backup_job_summaries() + list_backup_job_summaries(params::Dict{String,<:Any}) -Returns the Amazon Web Services resource types supported by Backup. +This is a request for a summary of backup jobs created or running within the most recent 30 +days. You can include parameters AccountID, State, ResourceType, MessageCategory, +AggregationPeriod, MaxResults, or NextToken to filter results. This request returns a +summary that contains Region, Account, State, ResourceType, MessageCategory, StartTime, +EndTime, and Count of included jobs. -""" -function get_supported_resource_types(; aws_config::AbstractAWSConfig=global_aws_config()) +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: Returns the job count for the specified account. If the request is sent + from a member account or an account not part of Amazon Web Services Organizations, jobs + within requestor's account will be returned. Root, admin, and delegated administrator + accounts can use the value ANY to return job counts from every account in the organization. + AGGREGATE_ALL aggregates job counts from all accounts within the authenticated + organization, then returns the sum. +- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. + Acceptable values include ONE_DAY for daily job count for the prior 14 days. + SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for + aggregated job count for prior 14 days. +- `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value + is an integer. Range of accepted values is from 1 to 500. +- `"MessageCategory"`: This parameter returns the job count for the specified message + category. Example accepted strings include AccessDenied, Success, and InvalidParameters. + See Monitoring for a list of accepted MessageCategory strings. The the value ANY returns + count of all message categories. AGGREGATE_ALL aggregates job counts for all message + categories and returns the sum. +- `"NextToken"`: The next item following a partial list of returned resources. For example, + if a request is made to return MaxResults number of resources, NextToken allows you to + return more items in your list starting at the location pointed to by the next token. +- `"ResourceType"`: Returns the job count for the specified resource type. Use request + GetSupportedResourceTypes to obtain strings for supported resource types. The the value ANY + returns count of all resource types. AGGREGATE_ALL aggregates job counts for all resource + types and returns the sum. The type of Amazon Web Services resource to be backed up; for + example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database + Service (Amazon RDS) database. +- `"State"`: This parameter returns the job count for jobs with the specified state. The + the value ANY returns count of all states. AGGREGATE_ALL aggregates job counts for all + states and returns the sum. Completed with issues is a status found only in the Backup + console. For API, this status refers to jobs with a state of COMPLETED and a + MessageCategory with a value other than SUCCESS; that is, the status is completed but comes + with a status message. To obtain the job count for Completed with issues, run two GET + requests, and subtract the second, smaller number: GET + /audit/backup-job-summaries?AggregationPeriod=FOURTEEN_DAYS&State=COMPLETED GET + /audit/backup-job-summaries?AggregationPeriod=FOURTEEN_DAYS&MessageCategory=SUCCESS& + ;State=COMPLETED +""" +function list_backup_job_summaries(; aws_config::AbstractAWSConfig=global_aws_config()) return backup( "GET", - "/supported-resource-types"; + "/audit/backup-job-summaries"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_supported_resource_types( +function list_backup_job_summaries( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return backup( "GET", - "/supported-resource-types", + "/audit/backup-job-summaries", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1563,19 +2079,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"createdAfter"`: Returns only backup jobs that were created after the specified date. - `"createdBefore"`: Returns only backup jobs that were created before the specified date. - `"maxResults"`: The maximum number of items to be returned. +- `"messageCategory"`: This is an optional parameter that can be used to filter out jobs + with a MessageCategory which matches the value you input. Example strings may include + AccessDenied, SUCCESS, AGGREGATE_ALL, and InvalidParameters. View Monitoring The wildcard + () returns count of all message categories. AGGREGATE_ALL aggregates job counts for all + message categories and returns the sum. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. - `"parentJobId"`: This is a filter to list child (nested) jobs based on parent job ID. - `"resourceArn"`: Returns only backup jobs that match the specified resource Amazon Resource Name (ARN). - `"resourceType"`: Returns only backup jobs for the specified resources: Aurora for - Amazon Aurora DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB - for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute - Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon - Neptune RDS for Amazon Relational Database Service Storage Gateway for Storage - Gateway S3 for Amazon S3 VirtualMachine for virtual machines -- `"state"`: Returns only backup jobs that are in the specified state. + Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB + (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic + Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System + FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS + for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases + Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream + VirtualMachine for virtual machines +- `"state"`: Returns only backup jobs that are in the specified state. Completed with + issues is a status found only in the Backup console. For API, this status refers to jobs + with a state of COMPLETED and a MessageCategory with a value other than SUCCESS; that is, + the status is completed but comes with a status message. To obtain the job count for + Completed with issues, run two GET requests, and subtract the second, smaller number: GET + /backup-jobs/?state=COMPLETED GET /backup-jobs/?messageCategory=SUCCESS&state=COMPLETED """ function list_backup_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) return backup( @@ -1605,7 +2133,7 @@ the creation and deletion dates. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_backup_plan_templates(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1642,7 +2170,7 @@ backup plan IDs, creation and deletion dates, plan names, and version IDs. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_backup_plan_versions( @@ -1683,7 +2211,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys backup plans when set to TRUE. - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_backup_plans(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1717,7 +2245,7 @@ plan. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_backup_selections( @@ -1754,8 +2282,10 @@ Returns a list of recovery point storage containers along with information about Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. +- `"shared"`: This parameter will sort the list of vaults by shared vaults. +- `"vaultType"`: This parameter will sort the list of vaults by vault type. """ function list_backup_vaults(; aws_config::AbstractAWSConfig=global_aws_config()) return backup( @@ -1774,6 +2304,67 @@ function list_backup_vaults( ) end +""" + list_copy_job_summaries() + list_copy_job_summaries(params::Dict{String,<:Any}) + +This request obtains a list of copy jobs created or running within the the most recent 30 +days. You can include parameters AccountID, State, ResourceType, MessageCategory, +AggregationPeriod, MaxResults, or NextToken to filter results. This request returns a +summary that contains Region, Account, State, RestourceType, MessageCategory, StartTime, +EndTime, and Count of included jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: Returns the job count for the specified account. If the request is sent + from a member account or an account not part of Amazon Web Services Organizations, jobs + within requestor's account will be returned. Root, admin, and delegated administrator + accounts can use the value ANY to return job counts from every account in the organization. + AGGREGATE_ALL aggregates job counts from all accounts within the authenticated + organization, then returns the sum. +- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. + ONE_DAY for daily job count for the prior 14 days. SEVEN_DAYS for the aggregated job + count for the prior 7 days. FOURTEEN_DAYS for aggregated job count for prior 14 days. +- `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value + is an integer. Range of accepted values is from 1 to 500. +- `"MessageCategory"`: This parameter returns the job count for the specified message + category. Example accepted strings include AccessDenied, Success, and InvalidParameters. + See Monitoring for a list of accepted MessageCategory strings. The the value ANY returns + count of all message categories. AGGREGATE_ALL aggregates job counts for all message + categories and returns the sum. +- `"NextToken"`: The next item following a partial list of returned resources. For example, + if a request is made to return MaxResults number of resources, NextToken allows you to + return more items in your list starting at the location pointed to by the next token. +- `"ResourceType"`: Returns the job count for the specified resource type. Use request + GetSupportedResourceTypes to obtain strings for supported resource types. The the value ANY + returns count of all resource types. AGGREGATE_ALL aggregates job counts for all resource + types and returns the sum. The type of Amazon Web Services resource to be backed up; for + example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database + Service (Amazon RDS) database. +- `"State"`: This parameter returns the job count for jobs with the specified state. The + the value ANY returns count of all states. AGGREGATE_ALL aggregates job counts for all + states and returns the sum. +""" +function list_copy_job_summaries(; aws_config::AbstractAWSConfig=global_aws_config()) + return backup( + "GET", + "/audit/copy-job-summaries"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_copy_job_summaries( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/audit/copy-job-summaries", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_copy_jobs() list_copy_jobs(params::Dict{String,<:Any}) @@ -1794,18 +2385,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault. - `"maxResults"`: The maximum number of items to be returned. +- `"messageCategory"`: This is an optional parameter that can be used to filter out jobs + with a MessageCategory which matches the value you input. Example strings may include + AccessDenied, SUCCESS, AGGREGATE_ALL, and INVALIDPARAMETERS. View Monitoring for a list of + accepted strings. The the value ANY returns count of all message categories. AGGREGATE_ALL + aggregates job counts for all message categories and returns the sum. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. - `"parentJobId"`: This is a filter to list child (nested) jobs based on parent job ID. - `"resourceArn"`: Returns only copy jobs that match the specified resource Amazon Resource Name (ARN). - `"resourceType"`: Returns only backup jobs for the specified resources: Aurora for - Amazon Aurora DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB - for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute - Cloud EFS for Amazon Elastic File System FSx for Amazon FSx Neptune for Amazon - Neptune RDS for Amazon Relational Database Service Storage Gateway for Storage - Gateway S3 for Amazon S3 VirtualMachine for virtual machines + Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB + (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic + Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System + FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS + for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases + Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream + VirtualMachine for virtual machines - `"state"`: Returns only copy jobs that are in the specified state. """ function list_copy_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1862,7 +2460,7 @@ This action returns metadata about active and previous legal holds. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of resource list items to be returned. - `"nextToken"`: The next item following a partial list of returned resources. For example, - if a request is made to return maxResults number of resources, NextToken allows you to + if a request is made to return MaxResults number of resources, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_legal_holds(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1893,7 +2491,7 @@ resource was saved, an Amazon Resource Name (ARN) of the resource, and a resourc Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_protected_resources(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1909,6 +2507,49 @@ function list_protected_resources( ) end +""" + list_protected_resources_by_backup_vault(backup_vault_name) + list_protected_resources_by_backup_vault(backup_vault_name, params::Dict{String,<:Any}) + +This request lists the protected resources corresponding to each backup vault. + +# Arguments +- `backup_vault_name`: This is the list of protected resources by backup vault within the + vault(s) you specify by name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"backupVaultAccountId"`: This is the list of protected resources by backup vault within + the vault(s) you specify by account ID. +- `"maxResults"`: The maximum number of items to be returned. +- `"nextToken"`: The next item following a partial list of returned items. For example, if + a request is made to return MaxResults number of items, NextToken allows you to return more + items in your list starting at the location pointed to by the next token. +""" +function list_protected_resources_by_backup_vault( + backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/backup-vaults/$(backupVaultName)/resources/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_protected_resources_by_backup_vault( + backupVaultName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/backup-vaults/$(backupVaultName)/resources/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_recovery_points_by_backup_vault(backup_vault_name) list_recovery_points_by_backup_vault(backup_vault_name, params::Dict{String,<:Any}) @@ -1925,19 +2566,28 @@ Returns detailed information about the recovery points stored in a backup vault. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"backupPlanId"`: Returns only recovery points that match the specified backup plan ID. +- `"backupVaultAccountId"`: This parameter will sort the list of recovery points by account + ID. - `"createdAfter"`: Returns only recovery points that were created after the specified timestamp. - `"createdBefore"`: Returns only recovery points that were created before the specified timestamp. - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. - `"parentRecoveryPointArn"`: This returns only recovery points that match the specified parent (composite) recovery point Amazon Resource Name (ARN). - `"resourceArn"`: Returns only recovery points that match the specified resource Amazon Resource Name (ARN). -- `"resourceType"`: Returns only recovery points that match the specified resource type. +- `"resourceType"`: Returns only recovery points that match the specified resource type(s): + Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon + DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon + Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File + System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift + RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA + databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for + Amazon Timestream VirtualMachine for virtual machines """ function list_recovery_points_by_backup_vault( backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1976,7 +2626,7 @@ This action returns recovery point ARNs (Amazon Resource Names) of the specified Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: This is the maximum number of resource list items to be returned. - `"nextToken"`: This is the next item following a partial list of returned resources. For - example, if a request is made to return maxResults number of resources, NextToken allows + example, if a request is made to return MaxResults number of resources, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_recovery_points_by_legal_hold( @@ -2017,10 +2667,14 @@ recovery points created by Backup. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"managedByAWSBackupOnly"`: This attribute filters recovery points based on ownership. If + this is set to TRUE, the response will contain recovery points associated with the selected + resources that are managed by Backup. If this is set to FALSE, the response will contain + all recovery points associated with the selected resource. Type: Boolean - `"maxResults"`: The maximum number of items to be returned. Amazon RDS requires a value of at least 20. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_recovery_points_by_resource( @@ -2117,6 +2771,63 @@ function list_report_plans( ) end +""" + list_restore_job_summaries() + list_restore_job_summaries(params::Dict{String,<:Any}) + +This request obtains a summary of restore jobs created or running within the the most +recent 30 days. You can include parameters AccountID, State, ResourceType, +AggregationPeriod, MaxResults, or NextToken to filter results. This request returns a +summary that contains Region, Account, State, RestourceType, MessageCategory, StartTime, +EndTime, and Count of included jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: Returns the job count for the specified account. If the request is sent + from a member account or an account not part of Amazon Web Services Organizations, jobs + within requestor's account will be returned. Root, admin, and delegated administrator + accounts can use the value ANY to return job counts from every account in the organization. + AGGREGATE_ALL aggregates job counts from all accounts within the authenticated + organization, then returns the sum. +- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. + Acceptable values include ONE_DAY for daily job count for the prior 14 days. + SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for + aggregated job count for prior 14 days. +- `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value + is an integer. Range of accepted values is from 1 to 500. +- `"NextToken"`: The next item following a partial list of returned resources. For example, + if a request is made to return MaxResults number of resources, NextToken allows you to + return more items in your list starting at the location pointed to by the next token. +- `"ResourceType"`: Returns the job count for the specified resource type. Use request + GetSupportedResourceTypes to obtain strings for supported resource types. The the value ANY + returns count of all resource types. AGGREGATE_ALL aggregates job counts for all resource + types and returns the sum. The type of Amazon Web Services resource to be backed up; for + example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database + Service (Amazon RDS) database. +- `"State"`: This parameter returns the job count for jobs with the specified state. The + the value ANY returns count of all states. AGGREGATE_ALL aggregates job counts for all + states and returns the sum. +""" +function list_restore_job_summaries(; aws_config::AbstractAWSConfig=global_aws_config()) + return backup( + "GET", + "/audit/restore-job-summaries"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_restore_job_summaries( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/audit/restore-job-summaries", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_restore_jobs() list_restore_jobs(params::Dict{String,<:Any}) @@ -2136,8 +2847,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"createdBefore"`: Returns only restore jobs that were created before the specified date. - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. +- `"resourceType"`: Include this parameter to return only restore jobs for the specified + resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB + for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS + for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon + Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for + Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for + SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream + for Amazon Timestream VirtualMachine for virtual machines +- `"restoreTestingPlanArn"`: This returns only restore testing jobs that match the + specified resource Amazon Resource Name (ARN). - `"status"`: Returns only restore jobs associated with the specified job status. """ function list_restore_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2157,6 +2878,129 @@ function list_restore_jobs( ) end +""" + list_restore_jobs_by_protected_resource(resource_arn) + list_restore_jobs_by_protected_resource(resource_arn, params::Dict{String,<:Any}) + +This returns restore jobs that contain the specified protected resource. You must include +ResourceArn. You can optionally include NextToken, ByStatus, MaxResults, +ByRecoveryPointCreationDateAfter , and ByRecoveryPointCreationDateBefore. + +# Arguments +- `resource_arn`: Returns only restore jobs that match the specified resource Amazon + Resource Name (ARN). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to be returned. +- `"nextToken"`: The next item following a partial list of returned items. For example, if + a request ismade to return MaxResults number of items, NextToken allows you to return more + items in your list starting at the location pointed to by the next token. +- `"recoveryPointCreationDateAfter"`: Returns only restore jobs of recovery points that + were created after the specified date. +- `"recoveryPointCreationDateBefore"`: Returns only restore jobs of recovery points that + were created before the specified date. +- `"status"`: Returns only restore jobs associated with the specified job status. +""" +function list_restore_jobs_by_protected_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/resources/$(resourceArn)/restore-jobs/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_restore_jobs_by_protected_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/resources/$(resourceArn)/restore-jobs/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_restore_testing_plans() + list_restore_testing_plans(params::Dict{String,<:Any}) + +Returns a list of restore testing plans. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of items to be returned. +- `"NextToken"`: The next item following a partial list of returned items. For example, if + a request is made to return MaxResults number of items, NextToken allows you to return more + items in your list starting at the location pointed to by the nexttoken. +""" +function list_restore_testing_plans(; aws_config::AbstractAWSConfig=global_aws_config()) + return backup( + "GET", + "/restore-testing/plans"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_restore_testing_plans( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/restore-testing/plans", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_restore_testing_selections(restore_testing_plan_name) + list_restore_testing_selections(restore_testing_plan_name, params::Dict{String,<:Any}) + +Returns a list of restore testing selections. Can be filtered by MaxResults and +RestoreTestingPlanName. + +# Arguments +- `restore_testing_plan_name`: Returns restore testing selections by the specified restore + testing plan name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of items to be returned. +- `"NextToken"`: The next item following a partial list of returned items. For example, if + a request is made to return MaxResults number of items, NextToken allows you to return more + items in your list starting at the location pointed to by the nexttoken. +""" +function list_restore_testing_selections( + RestoreTestingPlanName; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_restore_testing_selections( + RestoreTestingPlanName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "GET", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags(resource_arn) list_tags(resource_arn, params::Dict{String,<:Any}) @@ -2175,7 +3019,7 @@ of the Feature availability by resource table. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if - a request is made to return maxResults number of items, NextToken allows you to return more + a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. """ function list_tags(resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2384,6 +3228,53 @@ function put_backup_vault_notifications( ) end +""" + put_restore_validation_result(validation_status, restore_job_id) + put_restore_validation_result(validation_status, restore_job_id, params::Dict{String,<:Any}) + +This request allows you to send your independent self-run restore test validation results. +RestoreJobId and ValidationStatus are required. Optionally, you can input a +ValidationStatusMessage. + +# Arguments +- `validation_status`: This is the status of your restore validation. +- `restore_job_id`: This is a unique identifier of a restore job within Backup. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ValidationStatusMessage"`: This is an optional message string you can input to describe + the validation status for the restore test validation. +""" +function put_restore_validation_result( + ValidationStatus, restoreJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return backup( + "PUT", + "/restore-jobs/$(restoreJobId)/validations", + Dict{String,Any}("ValidationStatus" => ValidationStatus); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_restore_validation_result( + ValidationStatus, + restoreJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-jobs/$(restoreJobId)/validations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ValidationStatus" => ValidationStatus), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_backup_job(backup_vault_name, iam_role_arn, resource_arn) start_backup_job(backup_vault_name, iam_role_arn, resource_arn, params::Dict{String,<:Any}) @@ -2410,7 +3301,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CompleteWindowMinutes"`: A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time - for StartWindowMinutes, or if the backup started later than scheduled. + for StartWindowMinutes, or if the backup started later than scheduled. Like + StartWindowMinutes, this parameter has a maximum value of 100 years (52,560,000 minutes). - `"IdempotencyToken"`: A customer-chosen string that you can use to distinguish between otherwise identical calls to StartBackupJob. Retrying a successful request with the same idempotency token results in a success message with no action taken. @@ -2422,18 +3314,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by - resource table. Backup ignores this expression for other resource types. + resource table. Backup ignores this expression for other resource types. This parameter has + a maximum value of 100 years (36,500 days). - `"RecoveryPointTags"`: To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. - `"StartWindowMinutes"`: A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 - hours. If this value is included, it must be at least 60 minutes to avoid errors. During - the start window, the backup job status remains in CREATED status until it has successfully - begun or until the start window time has run out. If within the start window time Backup - receives an error that allows the job to be retried, Backup will automatically retry to - begin the job at least every 10 minutes until the backup successfully begins (the job - status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to - occur when the start window time is over). + hours. If this value is included, it must be at least 60 minutes to avoid errors. This + parameter has a maximum value of 100 years (52,560,000 minutes). During the start window, + the backup job status remains in CREATED status until it has successfully begun or until + the start window time has run out. If within the start window time Backup receives an error + that allows the job to be retried, Backup will automatically retry to begin the job at + least every 10 minutes until the backup successfully begins (the job status changes to + RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the + start window time is over). """ function start_backup_job( BackupVaultName, @@ -2685,8 +3579,8 @@ end Attempts to cancel a job to create a one-time backup of a resource. This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for -Lustre, FSx for ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB -compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune. +Lustre, Amazon FSx for NetApp ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB (with +MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune. # Arguments - `backup_job_id`: Uniquely identifies a request to Backup to back up a resource. @@ -2977,11 +3871,8 @@ end update_region_settings() update_region_settings(params::Dict{String,<:Any}) -Updates the current service opt-in settings for the Region. If service-opt-in is enabled -for a service, Backup tries to protect that service's resources in this Region, when the -resource is included in an on-demand backup or scheduled backup plan. Otherwise, Backup -does not try to protect that service's resources in this Region. Use the -DescribeRegionSettings API to determine the resource types that are supported. +Updates the current service opt-in settings for the Region. Use the DescribeRegionSettings +API to determine the resource types that are supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2990,7 +3881,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Backup's advanced DynamoDB backup features, follow the procedure to enable advanced DynamoDB backup programmatically. - `"ResourceTypeOptInPreference"`: Updates the list of services along with the opt-in - preferences for the Region. + preferences for the Region. If resource assignments are only based on tags, then service + opt-in settings are applied. If a resource type is explicitly assigned to a backup plan, + such as Amazon S3, Amazon EC2, or Amazon RDS, it will be included in the backup even if the + opt-in is not enabled for that particular service. If both a resource type and tags are + specified in a resource assignment, the resource type specified in the backup plan takes + priority over the tag condition. Service opt-in settings are disregarded in this situation. """ function update_region_settings(; aws_config::AbstractAWSConfig=global_aws_config()) return backup( @@ -3065,3 +3961,105 @@ function update_report_plan( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_restore_testing_plan(restore_testing_plan, restore_testing_plan_name) + update_restore_testing_plan(restore_testing_plan, restore_testing_plan_name, params::Dict{String,<:Any}) + +This request will send changes to your specified restore testing plan. +RestoreTestingPlanName cannot be updated after it is created. RecoveryPointSelection can +contain: Algorithm ExcludeVaults IncludeVaults RecoveryPointTypes +SelectionWindowDays + +# Arguments +- `restore_testing_plan`: Specifies the body of a restore testing plan. +- `restore_testing_plan_name`: This is the restore testing plan name you wish to update. + +""" +function update_restore_testing_plan( + RestoreTestingPlan, + RestoreTestingPlanName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)", + Dict{String,Any}("RestoreTestingPlan" => RestoreTestingPlan); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_restore_testing_plan( + RestoreTestingPlan, + RestoreTestingPlanName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RestoreTestingPlan" => RestoreTestingPlan), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_restore_testing_selection(restore_testing_plan_name, restore_testing_selection, restore_testing_selection_name) + update_restore_testing_selection(restore_testing_plan_name, restore_testing_selection, restore_testing_selection_name, params::Dict{String,<:Any}) + +Most elements except the RestoreTestingSelectionName can be updated with this request. +RestoreTestingSelection can use either protected resource ARNs or conditions, but not both. +That is, if your selection has ProtectedResourceArns, requesting an update with the +parameter ProtectedResourceConditions will be unsuccessful. + +# Arguments +- `restore_testing_plan_name`: The restore testing plan name is required to update the + indicated testing plan. +- `restore_testing_selection`: To update your restore testing selection, you can use either + protected resource ARNs or conditions, but not both. That is, if your selection has + ProtectedResourceArns, requesting an update with the parameter ProtectedResourceConditions + will be unsuccessful. +- `restore_testing_selection_name`: This is the required restore testing selection name of + the restore testing selection you wish to update. + +""" +function update_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelection, + RestoreTestingSelectionName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)", + Dict{String,Any}("RestoreTestingSelection" => RestoreTestingSelection); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_restore_testing_selection( + RestoreTestingPlanName, + RestoreTestingSelection, + RestoreTestingSelectionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return backup( + "PUT", + "/restore-testing/plans/$(RestoreTestingPlanName)/selections/$(RestoreTestingSelectionName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RestoreTestingSelection" => RestoreTestingSelection), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/backupstorage.jl b/src/services/backupstorage.jl deleted file mode 100644 index 6358a00454..0000000000 --- a/src/services/backupstorage.jl +++ /dev/null @@ -1,403 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: backupstorage -using AWS.Compat -using AWS.UUIDs - -""" - delete_object(job_id, object_name) - delete_object(job_id, object_name, params::Dict{String,<:Any}) - -Delete Object from the incremental base Backup. - -# Arguments -- `job_id`: Backup job Id for the in-progress backup. -- `object_name`: The name of the Object. - -""" -function delete_object(jobId, objectName; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "DELETE", - "/backup-jobs/$(jobId)/object/$(objectName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_object( - jobId, - objectName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "DELETE", - "/backup-jobs/$(jobId)/object/$(objectName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_chunk(chunk_token, job_id) - get_chunk(chunk_token, job_id, params::Dict{String,<:Any}) - -Gets the specified object's chunk. - -# Arguments -- `chunk_token`: Chunk token -- `job_id`: Storage job id - -""" -function get_chunk(chunkToken, jobId; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/chunk/$(chunkToken)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_chunk( - chunkToken, - jobId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/chunk/$(chunkToken)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_object_metadata(job_id, object_token) - get_object_metadata(job_id, object_token, params::Dict{String,<:Any}) - -Get metadata associated with an Object. - -# Arguments -- `job_id`: Backup job id for the in-progress backup. -- `object_token`: Object token. - -""" -function get_object_metadata( - jobId, objectToken; aws_config::AbstractAWSConfig=global_aws_config() -) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/object/$(objectToken)/metadata"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_object_metadata( - jobId, - objectToken, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/object/$(objectToken)/metadata", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_chunks(job_id, object_token) - list_chunks(job_id, object_token, params::Dict{String,<:Any}) - -List chunks in a given Object - -# Arguments -- `job_id`: Storage job id -- `object_token`: Object token - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"max-results"`: Maximum number of chunks -- `"next-token"`: Pagination token -""" -function list_chunks(jobId, objectToken; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/chunks/$(objectToken)/list"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_chunks( - jobId, - objectToken, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/chunks/$(objectToken)/list", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_objects(job_id) - list_objects(job_id, params::Dict{String,<:Any}) - -List all Objects in a given Backup. - -# Arguments -- `job_id`: Storage job id - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"created-after"`: (Optional) Created after filter -- `"created-before"`: (Optional) Created before filter -- `"max-results"`: Maximum objects count -- `"next-token"`: Pagination token -- `"starting-object-name"`: Optional, specifies the starting Object name to list from. - Ignored if NextToken is not NULL -- `"starting-object-prefix"`: Optional, specifies the starting Object prefix to list from. - Ignored if NextToken is not NULL -""" -function list_objects(jobId; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/objects/list"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_objects( - jobId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return backupstorage( - "GET", - "/restore-jobs/$(jobId)/objects/list", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - notify_object_complete(checksum, checksum-algorithm, job_id, upload_id) - notify_object_complete(checksum, checksum-algorithm, job_id, upload_id, params::Dict{String,<:Any}) - -Complete upload - -# Arguments -- `checksum`: Object checksum -- `checksum-algorithm`: Checksum algorithm -- `job_id`: Backup job Id for the in-progress backup -- `upload_id`: Upload Id for the in-progress upload - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MetadataBlob"`: Optional metadata associated with an Object. Maximum length is 4MB. -- `"metadata-blob-length"`: The size of MetadataBlob. -- `"metadata-checksum"`: Checksum of MetadataBlob. -- `"metadata-checksum-algorithm"`: Checksum algorithm. -- `"metadata-string"`: Optional metadata associated with an Object. Maximum string length - is 256 bytes. -""" -function notify_object_complete( - checksum, - checksum_algorithm, - jobId, - uploadId; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(uploadId)/complete", - Dict{String,Any}( - "checksum" => checksum, "checksum-algorithm" => checksum_algorithm - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function notify_object_complete( - checksum, - checksum_algorithm, - jobId, - uploadId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(uploadId)/complete", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "checksum" => checksum, "checksum-algorithm" => checksum_algorithm - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_chunk(data, checksum, checksum-algorithm, chunk_index, job_id, length, upload_id) - put_chunk(data, checksum, checksum-algorithm, chunk_index, job_id, length, upload_id, params::Dict{String,<:Any}) - -Upload chunk. - -# Arguments -- `data`: Data to be uploaded -- `checksum`: Data checksum -- `checksum-algorithm`: Checksum algorithm -- `chunk_index`: Describes this chunk's position relative to the other chunks -- `job_id`: Backup job Id for the in-progress backup. -- `length`: Data length -- `upload_id`: Upload Id for the in-progress upload. - -""" -function put_chunk( - Data, - checksum, - checksum_algorithm, - chunkIndex, - jobId, - length, - uploadId; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/chunk/$(uploadId)/$(chunkIndex)", - Dict{String,Any}( - "Data" => Data, - "checksum" => checksum, - "checksum-algorithm" => checksum_algorithm, - "length" => length, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_chunk( - Data, - checksum, - checksum_algorithm, - chunkIndex, - jobId, - length, - uploadId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/chunk/$(uploadId)/$(chunkIndex)", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "Data" => Data, - "checksum" => checksum, - "checksum-algorithm" => checksum_algorithm, - "length" => length, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - put_object(job_id, object_name) - put_object(job_id, object_name, params::Dict{String,<:Any}) - -Upload object that can store object metadata String and data blob in single API call using -inline chunk field. - -# Arguments -- `job_id`: Backup job Id for the in-progress backup. -- `object_name`: The name of the Object to be uploaded. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"InlineChunk"`: Inline chunk data to be uploaded. -- `"checksum"`: Inline chunk checksum -- `"checksum-algorithm"`: Inline chunk checksum algorithm -- `"length"`: Length of the inline chunk data. -- `"metadata-string"`: Store user defined metadata like backup checksum, disk ids, restore - metadata etc. -- `"object-checksum"`: object checksum -- `"object-checksum-algorithm"`: object checksum algorithm -- `"throwOnDuplicate"`: Throw an exception if Object name is already exist. -""" -function put_object(jobId, objectName; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(objectName)/put-object"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function put_object( - jobId, - objectName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(objectName)/put-object", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - start_object(job_id, object_name) - start_object(job_id, object_name, params::Dict{String,<:Any}) - -Start upload containing one or many chunks. - -# Arguments -- `job_id`: Backup job Id for the in-progress backup -- `object_name`: Name for the object. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ThrowOnDuplicate"`: Throw an exception if Object name is already exist. -""" -function start_object(jobId, objectName; aws_config::AbstractAWSConfig=global_aws_config()) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(objectName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_object( - jobId, - objectName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return backupstorage( - "PUT", - "/backup-jobs/$(jobId)/object/$(objectName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/batch.jl b/src/services/batch.jl index ea6bb61249..a9a4cb5563 100644 --- a/src/services/batch.jl +++ b/src/services/batch.jl @@ -10,9 +10,13 @@ using AWS.UUIDs Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED or PENDING are canceled. A job inRUNNABLE remains in RUNNABLE until it reaches the head of the job queue. -Then the job status is updated to FAILED. Jobs that progressed to the STARTING or RUNNING -state aren't canceled. However, the API operation still succeeds, even if no job is -canceled. These jobs must be terminated with the TerminateJob operation. +Then the job status is updated to FAILED. A PENDING job is canceled after all dependency +jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING +status. When you try to cancel an array parent job in PENDING, Batch attempts to cancel all +child jobs. The array parent job is canceled when all child jobs are completed. Jobs that +progressed to the STARTING or RUNNING state aren't canceled. However, the API operation +still succeeds, even if no job is canceled. These jobs must be terminated with the +TerminateJob operation. # Arguments - `job_id`: The Batch job ID of the job to cancel. @@ -86,13 +90,15 @@ compute environment. In April 2022, Batch added enhanced support for updating environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules: Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked -role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE -or SPOT_CAPACITY_OPTIMIZED. Set the update to latest image version -(updateToLatestImageVersion) parameter to true. Don't specify an AMI ID in imageId, -imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In -that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at -the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID -in the imageId or imageIdOverride parameters, or the launch template identified by the +role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, +SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED. Set the update to latest image +version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion +parameter is used when you update a compute environment. This parameter is ignored when you +create a compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in +ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects +the latest Amazon ECS optimized AMI that's supported by Batch at the time the +infrastructure update is initiated. Alternatively, you can specify the AMI ID in the +imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be @@ -225,6 +231,9 @@ preference for scheduling jobs to that compute environment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"jobStateTimeLimitActions"`: The set of actions that Batch performs on jobs that remain + at the head of the job queue in the specified state longer than specified times. Batch will + perform each action after maxTimeSeconds has passed. - `"schedulingPolicyArn"`: The Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue uses a fair share scheduling policy. If this parameter isn't specified, the job queue uses a first in, first out (FIFO) @@ -536,7 +545,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"jobDefinitions"`: A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format arn:aws:batch:{Region}:{Account}:job-definition/{JobDefinitionName}:{Revision} or a short - version using the form {JobDefinitionName}:{Revision}. + version using the form {JobDefinitionName}:{Revision}. This parameter can't be used with + other parameters. - `"maxResults"`: The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page and a nextToken response element. The remaining results @@ -679,6 +689,41 @@ function describe_scheduling_policies( ) end +""" + get_job_queue_snapshot(job_queue) + get_job_queue_snapshot(job_queue, params::Dict{String,<:Any}) + +Provides a list of the first 100 RUNNABLE jobs associated to a single job queue. + +# Arguments +- `job_queue`: The job queue’s name or full queue Amazon Resource Name (ARN). + +""" +function get_job_queue_snapshot(jobQueue; aws_config::AbstractAWSConfig=global_aws_config()) + return batch( + "POST", + "/v1/getjobqueuesnapshot", + Dict{String,Any}("jobQueue" => jobQueue); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_job_queue_snapshot( + jobQueue, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return batch( + "POST", + "/v1/getjobqueuesnapshot", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("jobQueue" => jobQueue), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_jobs() list_jobs(params::Dict{String,<:Any}) @@ -723,12 +768,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"jobStatus"`: The job status used to filter jobs in the specified queue. If the filters parameter is specified, the jobStatus parameter is ignored and jobs with any status are returned. If you don't specify a status, only RUNNING jobs are returned. -- `"maxResults"`: The maximum number of results returned by ListJobs in paginated output. - When this parameter is used, ListJobs only returns maxResults results in a single page and - a nextToken response element. The remaining results of the initial request can be seen by - sending another ListJobs request with the returned nextToken value. This value can be - between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results - and a nextToken value if applicable. +- `"maxResults"`: The maximum number of results returned by ListJobs in a paginated output. + When this parameter is used, ListJobs returns up to maxResults results in a single page and + a nextToken response element, if applicable. The remaining results of the initial request + can be seen by sending another ListJobs request with the returned nextToken value. The + following outlines key parameters and limitations: The minimum value is 1. When + --job-status is used, Batch returns up to 1000 values. When --filters is used, Batch + returns up to 100 values. If neither parameter is used, then ListJobs returns up to 1000 + results (jobs that are in the RUNNING status) and a nextToken value, if applicable. - `"multiNodeJobId"`: The job ID for a multi-node parallel job. Specifying a multi-node parallel job ID with this parameter lists all nodes that are associated with the specified job. @@ -848,25 +895,28 @@ Registers an Batch job definition. letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). - `type`: The type of job definition. For more information about multi-node parallel jobs, - see Creating a multi-node parallel job definition in the Batch User Guide. If the job is - run on Fargate resources, then multinode isn't supported. + see Creating a multi-node parallel job definition in the Batch User Guide. If the value + is container, then one of the following is required: containerProperties, ecsProperties, or + eksProperties. If the value is multinode, then nodeProperties is required. If the job + is run on Fargate resources, then multinode isn't supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"containerProperties"`: An object with various properties specific to Amazon ECS based +- `"containerProperties"`: An object with properties specific to Amazon ECS-based single-node container-based jobs. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties. This must not be specified - for Amazon EKS based job definitions. If the job runs on Fargate resources, then you must + for Amazon EKS-based job definitions. If the job runs on Fargate resources, then you must not specify nodeProperties; use only containerProperties. -- `"eksProperties"`: An object with various properties that are specific to Amazon EKS - based jobs. This must not be specified for Amazon ECS based job definitions. -- `"nodeProperties"`: An object with various properties specific to multi-node parallel - jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For - more information, see Multi-node Parallel Jobs in the Batch User Guide. If the job - definition's type parameter is container, then you must specify either containerProperties - or nodeProperties. If the job runs on Fargate resources, then you must not specify - nodeProperties; use containerProperties instead. If the job runs on Amazon EKS resources, - then you must not specify nodeProperties. +- `"ecsProperties"`: An object with properties that are specific to Amazon ECS-based jobs. + This must not be specified for Amazon EKS-based job definitions. +- `"eksProperties"`: An object with properties that are specific to Amazon EKS-based jobs. + This must not be specified for Amazon ECS based job definitions. +- `"nodeProperties"`: An object with properties specific to multi-node parallel jobs. If + you specify node properties for a job, it becomes a multi-node parallel job. For more + information, see Multi-node Parallel Jobs in the Batch User Guide. If the job runs on + Fargate resources, then you must not specify nodeProperties; use containerProperties + instead. If the job runs on Amazon EKS resources, then you must not specify + nodeProperties. - `"parameters"`: Default parameter substitution placeholders to set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from the job definition. @@ -961,20 +1011,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"arrayProperties"`: The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the Batch User Guide. -- `"containerOverrides"`: An object with various properties that override the defaults for - the job definition that specify the name of a container in the specified job definition and - the overrides it should receive. You can override the default command for a container, - which is specified in the job definition or the Docker image, with a command override. You - can also override existing environment variables on a container or add new environment - variables to it with an environment override. +- `"containerOverrides"`: An object with properties that override the defaults for the job + definition that specify the name of a container in the specified job definition and the + overrides it should receive. You can override the default command for a container, which is + specified in the job definition or the Docker image, with a command override. You can also + override existing environment variables on a container or add new environment variables to + it with an environment override. - `"dependsOn"`: A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin. -- `"eksPropertiesOverride"`: An object that can only be specified for jobs that are run on - Amazon EKS resources with various properties that override defaults for the job definition. +- `"ecsPropertiesOverride"`: An object, with properties that override defaults for the job + definition, can only be specified for jobs that are run on Amazon ECS resources. +- `"eksPropertiesOverride"`: An object, with properties that override defaults for the job + definition, can only be specified for jobs that are run on Amazon EKS resources. - `"nodeOverrides"`: A list of node overrides in JSON format that specify the node range to target and the container overrides for that node range. This parameter isn't applicable to jobs that are running on Fargate resources; use containerOverrides instead. @@ -994,11 +1046,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"schedulingPriorityOverride"`: The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling - priority in the job definition. The minimum supported value is 0 and the maximum supported - value is 9999. -- `"shareIdentifier"`: The share identifier for the job. If the job queue doesn't have a - scheduling policy, then this parameter must not be specified. If the job queue has a - scheduling policy, then this parameter must be specified. + priority in the job definition and works only within a single share identifier. The minimum + supported value is 0 and the maximum supported value is 9999. +- `"shareIdentifier"`: The share identifier for the job. Don't specify this parameter if + the job queue doesn't have a scheduling policy. If the job queue has a scheduling policy, + then this parameter must be specified. This string is limited to 255 alphanumeric + characters, and can be followed by an asterisk (*). - `"tags"`: The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference. @@ -1277,6 +1330,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EC2 and Fargate compute environments can't be mixed. All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue. +- `"jobStateTimeLimitActions"`: The set of actions that Batch perform on jobs that remain + at the head of the job queue in the specified state longer than specified times. Batch will + perform each action after maxTimeSeconds has passed. - `"priority"`: The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a diff --git a/src/services/bcm_data_exports.jl b/src/services/bcm_data_exports.jl new file mode 100644 index 0000000000..0f1878e4b1 --- /dev/null +++ b/src/services/bcm_data_exports.jl @@ -0,0 +1,452 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: bcm_data_exports +using AWS.Compat +using AWS.UUIDs + +""" + create_export(export) + create_export(export, params::Dict{String,<:Any}) + +Creates a data export and specifies the data query, the delivery preference, and any +optional resource tags. A DataQuery consists of both a QueryStatement and +TableConfigurations. The QueryStatement is an SQL statement. Data Exports only supports a +limited subset of the SQL syntax. For more information on the SQL syntax that is supported, +see Data query. To view the available tables and columns, see the Data Exports table +dictionary. The TableConfigurations is a collection of specified TableProperties for the +table being queried in the QueryStatement. TableProperties are additional configurations +you can provide to change the data and schema of a table. Each table can have different +TableProperties. However, tables are not required to have any TableProperties. Each table +property has a default value that it assumes if not specified. For more information on +table configurations, see Data query. To view the table properties available for each +table, see the Data Exports table dictionary or use the ListTables API to get a response of +all tables and their available properties. + +# Arguments +- `export`: The details of the export, including data query, name, description, and + destination configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceTags"`: An optional list of tags to associate with the specified export. Each + tag consists of a key and a value, and each key must be unique for the resource. +""" +function create_export(Export; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "CreateExport", + Dict{String,Any}("Export" => Export); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_export( + Export, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "CreateExport", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Export" => Export), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_export(export_arn) + delete_export(export_arn, params::Dict{String,<:Any}) + +Deletes an existing data export. + +# Arguments +- `export_arn`: The Amazon Resource Name (ARN) for this export. + +""" +function delete_export(ExportArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "DeleteExport", + Dict{String,Any}("ExportArn" => ExportArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_export( + ExportArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "DeleteExport", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ExportArn" => ExportArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_execution(execution_id, export_arn) + get_execution(execution_id, export_arn, params::Dict{String,<:Any}) + +Exports data based on the source data update. + +# Arguments +- `execution_id`: The ID for this specific execution. +- `export_arn`: The Amazon Resource Name (ARN) of the Export object that generated this + specific execution. + +""" +function get_execution( + ExecutionId, ExportArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "GetExecution", + Dict{String,Any}("ExecutionId" => ExecutionId, "ExportArn" => ExportArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_execution( + ExecutionId, + ExportArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "GetExecution", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ExecutionId" => ExecutionId, "ExportArn" => ExportArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_export(export_arn) + get_export(export_arn, params::Dict{String,<:Any}) + +Views the definition of an existing data export. + +# Arguments +- `export_arn`: The Amazon Resource Name (ARN) for this export. + +""" +function get_export(ExportArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "GetExport", + Dict{String,Any}("ExportArn" => ExportArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_export( + ExportArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "GetExport", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ExportArn" => ExportArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_table(table_name) + get_table(table_name, params::Dict{String,<:Any}) + +Returns the metadata for the specified table and table properties. This includes the list +of columns in the table schema, their data types, and column descriptions. + +# Arguments +- `table_name`: The name of the table. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"TableProperties"`: TableProperties are additional configurations you can provide to + change the data and schema of a table. Each table can have different TableProperties. + Tables are not required to have any TableProperties. Each table property has a default + value that it assumes if not specified. +""" +function get_table(TableName; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "GetTable", + Dict{String,Any}("TableName" => TableName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_table( + TableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "GetTable", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TableName" => TableName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_executions(export_arn) + list_executions(export_arn, params::Dict{String,<:Any}) + +Lists the historical executions for the export. + +# Arguments +- `export_arn`: The Amazon Resource Name (ARN) for this export. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that are returned for the request. +- `"NextToken"`: The token to retrieve the next set of results. +""" +function list_executions(ExportArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "ListExecutions", + Dict{String,Any}("ExportArn" => ExportArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_executions( + ExportArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "ListExecutions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ExportArn" => ExportArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_exports() + list_exports(params::Dict{String,<:Any}) + +Lists all data export definitions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that are returned for the request. +- `"NextToken"`: The token to retrieve the next set of results. +""" +function list_exports(; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "ListExports"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_exports( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "ListExports", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tables() + list_tables(params::Dict{String,<:Any}) + +Lists all available tables in data exports. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that are returned for the request. +- `"NextToken"`: The token to retrieve the next set of results. +""" +function list_tables(; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "ListTables"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_tables( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "ListTables", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List tags associated with an existing data export. + +# Arguments +- `resource_arn`: The unique identifier for the resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that are returned for the request. +- `"NextToken"`: The token to retrieve the next set of results. +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "ListTagsForResource", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, resource_tags) + tag_resource(resource_arn, resource_tags, params::Dict{String,<:Any}) + +Adds tags for an existing data export definition. + +# Arguments +- `resource_arn`: The unique identifier for the resource. +- `resource_tags`: The tags to associate with the resource. Each tag consists of a key and + a value, and each key must be unique for the resource. + +""" +function tag_resource( + ResourceArn, ResourceTags; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "TagResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "ResourceTags" => ResourceTags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + ResourceTags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceArn" => ResourceArn, "ResourceTags" => ResourceTags + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, resource_tag_keys) + untag_resource(resource_arn, resource_tag_keys, params::Dict{String,<:Any}) + +Deletes tags associated with an existing data export definition. + +# Arguments +- `resource_arn`: The unique identifier for the resource. +- `resource_tag_keys`: The tag keys that are associated with the resource ARN. + +""" +function untag_resource( + ResourceArn, ResourceTagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return bcm_data_exports( + "UntagResource", + Dict{String,Any}( + "ResourceArn" => ResourceArn, "ResourceTagKeys" => ResourceTagKeys + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + ResourceTagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceArn" => ResourceArn, "ResourceTagKeys" => ResourceTagKeys + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_export(export, export_arn) + update_export(export, export_arn, params::Dict{String,<:Any}) + +Updates an existing data export by overwriting all export parameters. All export parameters +must be provided in the UpdateExport request. + +# Arguments +- `export`: The name and query details for the export. +- `export_arn`: The Amazon Resource Name (ARN) for this export. + +""" +function update_export(Export, ExportArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bcm_data_exports( + "UpdateExport", + Dict{String,Any}("Export" => Export, "ExportArn" => ExportArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_export( + Export, + ExportArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bcm_data_exports( + "UpdateExport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Export" => Export, "ExportArn" => ExportArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock.jl b/src/services/bedrock.jl new file mode 100644 index 0000000000..d4d5f6aad1 --- /dev/null +++ b/src/services/bedrock.jl @@ -0,0 +1,1394 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: bedrock +using AWS.Compat +using AWS.UUIDs + +""" + create_evaluation_job(evaluation_config, inference_config, job_name, output_data_config, role_arn) + create_evaluation_job(evaluation_config, inference_config, job_name, output_data_config, role_arn, params::Dict{String,<:Any}) + +API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and +model evaluation jobs that use human workers. To learn more about the requirements for +creating a model evaluation job see, Model evaluations. + +# Arguments +- `evaluation_config`: Specifies whether the model evaluation job is automatic or uses + human worker. +- `inference_config`: Specify the models you want to use in your model evaluation job. + Automatic model evaluation jobs support a single model, and model evaluation job that use + human workers support two models. +- `job_name`: The name of the model evaluation job. Model evaluation job names must unique + with your AWS account, and your account's AWS region. +- `output_data_config`: An object that defines where the results of model evaluation job + will be saved in Amazon S3. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can + assume to perform tasks on your behalf. The service role must have Amazon Bedrock as the + service principal, and provide access to any Amazon S3 buckets specified in the + EvaluationConfig object. To pass this role to Amazon Bedrock, the caller of this API must + have the iam:PassRole permission. To learn more about the required permissions, see + Required permissions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"customerEncryptionKeyId"`: Specify your customer managed key ARN that will be used to + encrypt your model evaluation job. +- `"jobDescription"`: A description of the model evaluation job. +- `"jobTags"`: Tags to attach to the model evaluation job. +""" +function create_evaluation_job( + evaluationConfig, + inferenceConfig, + jobName, + outputDataConfig, + roleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/evaluation-jobs", + Dict{String,Any}( + "evaluationConfig" => evaluationConfig, + "inferenceConfig" => inferenceConfig, + "jobName" => jobName, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_evaluation_job( + evaluationConfig, + inferenceConfig, + jobName, + outputDataConfig, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/evaluation-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "evaluationConfig" => evaluationConfig, + "inferenceConfig" => inferenceConfig, + "jobName" => jobName, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name) + create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name, params::Dict{String,<:Any}) + +Creates a guardrail to block topics and to filter out harmful content. Specify a name and +optional description. Specify messages for when the guardrail successfully blocks a +prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. + Specify topics for the guardrail to deny in the topicPolicyConfig object. Each +GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name +and description so that the guardrail can properly identify the topic. Specify DENY in +the type field. (Optional) Provide up to five prompts that you would categorize as +belonging to the topic in the examples list. Specify filter strengths for the harmful +categories defined in Amazon Bedrock in the contentPolicyConfig object. Each +GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful +category. For more information, see Content filters. For more information about the fields +in a content filter, see GuardrailContentFilterConfig. Specify the category in the type +field. Specify the strength of the filter for prompts in the inputStrength field and for +model responses in the strength field of the GuardrailContentFilterConfig. (Optional) +For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any +tags to the guardrail in the tags object. For more information, see Tag resources. + +# Arguments +- `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. +- `blocked_outputs_messaging`: The message to return when the guardrail blocks a model + response. +- `name`: The name to give the guardrail. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than once. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency in the Amazon S3 User Guide. +- `"contentPolicyConfig"`: The content filter policies to configure for the guardrail. +- `"description"`: A description of the guardrail. +- `"kmsKeyId"`: The ARN of the KMS key that you use to encrypt the guardrail. +- `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for + the guardrail. +- `"tags"`: The tags that you want to attach to the guardrail. +- `"topicPolicyConfig"`: The topic policies to configure for the guardrail. +- `"wordPolicyConfig"`: The word policy you configure for the guardrail. +""" +function create_guardrail( + blockedInputMessaging, + blockedOutputsMessaging, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/guardrails", + Dict{String,Any}( + "blockedInputMessaging" => blockedInputMessaging, + "blockedOutputsMessaging" => blockedOutputsMessaging, + "name" => name, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_guardrail( + blockedInputMessaging, + blockedOutputsMessaging, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/guardrails", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "blockedInputMessaging" => blockedInputMessaging, + "blockedOutputsMessaging" => blockedOutputsMessaging, + "name" => name, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_guardrail_version(guardrail_identifier) + create_guardrail_version(guardrail_identifier, params::Dict{String,<:Any}) + +Creates a version of the guardrail. Use this API to create a snapshot of the guardrail when +you are satisfied with a configuration, or to compare the configuration with another +version. + +# Arguments +- `guardrail_identifier`: The unique identifier of the guardrail. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than once. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency in the Amazon S3 User Guide. +- `"description"`: A description of the guardrail version. +""" +function create_guardrail_version( + guardrailIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/guardrails/$(guardrailIdentifier)", + Dict{String,Any}("clientRequestToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_guardrail_version( + guardrailIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/guardrails/$(guardrailIdentifier)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clientRequestToken" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config) + create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config, params::Dict{String,<:Any}) + +Creates a fine-tuning job to customize a base model. You specify the base foundation model +and the location of the training data. After the model-customization job completes +successfully, your custom model resource will be ready to use. Amazon Bedrock returns +validation loss metrics and output generations after the job completes. For information on +the format of training and validation data, see Prepare the datasets. Model-customization +jobs are asynchronous and the completion time depends on the base model and the +training/validation data size. To monitor a job, use the GetModelCustomizationJob operation +to retrieve the job status. For more information, see Custom models in the Amazon Bedrock +User Guide. + +# Arguments +- `base_model_identifier`: Name of the base model. +- `custom_model_name`: A name for the resulting custom model. +- `hyper_parameters`: Parameters related to tuning the model. For details on the format for + different models, see Custom model hyperparameters. +- `job_name`: A name for the fine-tuning job. +- `output_data_config`: S3 location for the output data. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can + assume to perform tasks on your behalf. For example, during model training, Amazon Bedrock + needs your permission to read input data from an S3 bucket, write model artifacts to an S3 + bucket. To pass this role to Amazon Bedrock, the caller of this API must have the + iam:PassRole permission. +- `training_data_config`: Information about the training dataset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"customModelKmsKeyId"`: The custom model is encrypted at rest using this key. +- `"customModelTags"`: Tags to attach to the resulting custom model. +- `"customizationType"`: The customization type. +- `"jobTags"`: Tags to attach to the job. +- `"validationDataConfig"`: Information about the validation dataset. +- `"vpcConfig"`: VPC configuration (optional). Configuration parameters for the private + Virtual Private Cloud (VPC) that contains the resources you are using for this job. +""" +function create_model_customization_job( + baseModelIdentifier, + customModelName, + hyperParameters, + jobName, + outputDataConfig, + roleArn, + trainingDataConfig; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-customization-jobs", + Dict{String,Any}( + "baseModelIdentifier" => baseModelIdentifier, + "customModelName" => customModelName, + "hyperParameters" => hyperParameters, + "jobName" => jobName, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "trainingDataConfig" => trainingDataConfig, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_model_customization_job( + baseModelIdentifier, + customModelName, + hyperParameters, + jobName, + outputDataConfig, + roleArn, + trainingDataConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-customization-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "baseModelIdentifier" => baseModelIdentifier, + "customModelName" => customModelName, + "hyperParameters" => hyperParameters, + "jobName" => jobName, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "trainingDataConfig" => trainingDataConfig, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_provisioned_model_throughput(model_id, model_units, provisioned_model_name) + create_provisioned_model_throughput(model_id, model_units, provisioned_model_name, params::Dict{String,<:Any}) + +Creates dedicated throughput for a base or custom model with the model units and for the +duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more +information, see Provisioned Throughput in the Amazon Bedrock User Guide. + +# Arguments +- `model_id`: The Amazon Resource Name (ARN) or name of the model to associate with this + Provisioned Throughput. For a list of models for which you can purchase Provisioned + Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the + Amazon Bedrock User Guide. +- `model_units`: Number of model units to allocate. A model unit delivers a specific + throughput level for the specified model. The throughput level of a model unit specifies + the total number of input and output tokens that it can process and generate within a span + of one minute. By default, your account has no model units for purchasing Provisioned + Throughputs with commitment. You must first visit the Amazon Web Services support center to + request MUs. For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock + User Guide. For more information about what an MU specifies, contact your Amazon Web + Services account manager. +- `provisioned_model_name`: The name for this Provisioned Throughput. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency in the Amazon S3 User Guide. +- `"commitmentDuration"`: The commitment duration requested for the Provisioned Throughput. + Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit + Provisioned Throughput, omit this field. Custom models support all levels of commitment. To + see which base models support no commitment, see Supported regions and models for + Provisioned Throughput in the Amazon Bedrock User Guide +- `"tags"`: Tags to associate with this Provisioned Throughput. +""" +function create_provisioned_model_throughput( + modelId, + modelUnits, + provisionedModelName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/provisioned-model-throughput", + Dict{String,Any}( + "modelId" => modelId, + "modelUnits" => modelUnits, + "provisionedModelName" => provisionedModelName, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_provisioned_model_throughput( + modelId, + modelUnits, + provisionedModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/provisioned-model-throughput", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "modelId" => modelId, + "modelUnits" => modelUnits, + "provisionedModelName" => provisionedModelName, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_custom_model(model_identifier) + delete_custom_model(model_identifier, params::Dict{String,<:Any}) + +Deletes a custom model that you created earlier. For more information, see Custom models in +the Amazon Bedrock User Guide. + +# Arguments +- `model_identifier`: Name of the model to delete. + +""" +function delete_custom_model( + modelIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/custom-models/$(modelIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_custom_model( + modelIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "DELETE", + "/custom-models/$(modelIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_guardrail(guardrail_identifier) + delete_guardrail(guardrail_identifier, params::Dict{String,<:Any}) + +Deletes a guardrail. To delete a guardrail, only specify the ARN of the guardrail in the +guardrailIdentifier field. If you delete a guardrail, all of its versions will be deleted. + To delete a version of a guardrail, specify the ARN of the guardrail in the +guardrailIdentifier field and the version in the guardrailVersion field. + +# Arguments +- `guardrail_identifier`: The unique identifier of the guardrail. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"guardrailVersion"`: The version of the guardrail. +""" +function delete_guardrail( + guardrailIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/guardrails/$(guardrailIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_guardrail( + guardrailIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "DELETE", + "/guardrails/$(guardrailIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_model_invocation_logging_configuration() + delete_model_invocation_logging_configuration(params::Dict{String,<:Any}) + +Delete the invocation logging. + +""" +function delete_model_invocation_logging_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/logging/modelinvocations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_model_invocation_logging_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/logging/modelinvocations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_provisioned_model_throughput(provisioned_model_id) + delete_provisioned_model_throughput(provisioned_model_id, params::Dict{String,<:Any}) + +Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the +commitment term is over. For more information, see Provisioned Throughput in the Amazon +Bedrock User Guide. + +# Arguments +- `provisioned_model_id`: The Amazon Resource Name (ARN) or name of the Provisioned + Throughput. + +""" +function delete_provisioned_model_throughput( + provisionedModelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/provisioned-model-throughput/$(provisionedModelId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_provisioned_model_throughput( + provisionedModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "DELETE", + "/provisioned-model-throughput/$(provisionedModelId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_custom_model(model_identifier) + get_custom_model(model_identifier, params::Dict{String,<:Any}) + +Get the properties associated with a Amazon Bedrock custom model that you have created.For +more information, see Custom models in the Amazon Bedrock User Guide. + +# Arguments +- `model_identifier`: Name or Amazon Resource Name (ARN) of the custom model. + +""" +function get_custom_model( + modelIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/custom-models/$(modelIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_custom_model( + modelIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/custom-models/$(modelIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_evaluation_job(job_identifier) + get_evaluation_job(job_identifier, params::Dict{String,<:Any}) + +Retrieves the properties associated with a model evaluation job, including the status of +the job. For more information, see Model evaluations. + +# Arguments +- `job_identifier`: The Amazon Resource Name (ARN) of the model evaluation job. + +""" +function get_evaluation_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/evaluation-jobs/$(jobIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_evaluation_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/evaluation-jobs/$(jobIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_foundation_model(model_identifier) + get_foundation_model(model_identifier, params::Dict{String,<:Any}) + +Get details about a Amazon Bedrock foundation model. + +# Arguments +- `model_identifier`: The model identifier. + +""" +function get_foundation_model( + modelIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/foundation-models/$(modelIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_foundation_model( + modelIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/foundation-models/$(modelIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_guardrail(guardrail_identifier) + get_guardrail(guardrail_identifier, params::Dict{String,<:Any}) + +Gets details about a guardrail. If you don't specify a version, the response returns +details for the DRAFT version. + +# Arguments +- `guardrail_identifier`: The unique identifier of the guardrail for which to get details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"guardrailVersion"`: The version of the guardrail for which to get details. If you don't + specify a version, the response returns details for the DRAFT version. +""" +function get_guardrail( + guardrailIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/guardrails/$(guardrailIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_guardrail( + guardrailIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/guardrails/$(guardrailIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_model_customization_job(job_identifier) + get_model_customization_job(job_identifier, params::Dict{String,<:Any}) + +Retrieves the properties associated with a model-customization job, including the status of +the job. For more information, see Custom models in the Amazon Bedrock User Guide. + +# Arguments +- `job_identifier`: Identifier for the customization job. + +""" +function get_model_customization_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-customization-jobs/$(jobIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_customization_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/model-customization-jobs/$(jobIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_model_invocation_logging_configuration() + get_model_invocation_logging_configuration(params::Dict{String,<:Any}) + +Get the current configuration values for model invocation logging. + +""" +function get_model_invocation_logging_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/logging/modelinvocations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_invocation_logging_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/logging/modelinvocations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_provisioned_model_throughput(provisioned_model_id) + get_provisioned_model_throughput(provisioned_model_id, params::Dict{String,<:Any}) + +Returns details for a Provisioned Throughput. For more information, see Provisioned +Throughput in the Amazon Bedrock User Guide. + +# Arguments +- `provisioned_model_id`: The Amazon Resource Name (ARN) or name of the Provisioned + Throughput. + +""" +function get_provisioned_model_throughput( + provisionedModelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/provisioned-model-throughput/$(provisionedModelId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_provisioned_model_throughput( + provisionedModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/provisioned-model-throughput/$(provisionedModelId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_custom_models() + list_custom_models(params::Dict{String,<:Any}) + +Returns a list of the custom models that you have created with the +CreateModelCustomizationJob operation. For more information, see Custom models in the +Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"baseModelArnEquals"`: Return custom models only if the base model Amazon Resource Name + (ARN) matches this parameter. +- `"creationTimeAfter"`: Return custom models created after the specified time. +- `"creationTimeBefore"`: Return custom models created before the specified time. +- `"foundationModelArnEquals"`: Return custom models only if the foundation model Amazon + Resource Name (ARN) matches this parameter. +- `"maxResults"`: Maximum number of results to return in the response. +- `"nameContains"`: Return custom models only if the job name contains these characters. +- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list + the next set of results. +- `"sortBy"`: The field to sort by in the returned list of models. +- `"sortOrder"`: The sort order of the results. +""" +function list_custom_models(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/custom-models"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_custom_models( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/custom-models", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_evaluation_jobs() + list_evaluation_jobs(params::Dict{String,<:Any}) + +Lists model evaluation jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: A filter that includes model evaluation jobs created after the + time specified. +- `"creationTimeBefore"`: A filter that includes model evaluation jobs created prior to the + time specified. +- `"maxResults"`: The maximum number of results to return. +- `"nameContains"`: Query parameter string for model evaluation job names. +- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list + the next set of results. +- `"sortBy"`: Allows you to sort model evaluation jobs by when they were created. +- `"sortOrder"`: How you want the order of jobs sorted. +- `"statusEquals"`: Only return jobs where the status condition is met. +""" +function list_evaluation_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/evaluation-jobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_evaluation_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/evaluation-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_foundation_models() + list_foundation_models(params::Dict{String,<:Any}) + +Lists Amazon Bedrock foundation models that you can use. You can filter the results with +the request parameters. For more information, see Foundation models in the Amazon Bedrock +User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"byCustomizationType"`: Return models that support the customization type that you + specify. For more information, see Custom models in the Amazon Bedrock User Guide. +- `"byInferenceType"`: Return models that support the inference type that you specify. For + more information, see Provisioned Throughput in the Amazon Bedrock User Guide. +- `"byOutputModality"`: Return models that support the output modality that you specify. +- `"byProvider"`: Return models belonging to the model provider that you specify. +""" +function list_foundation_models(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/foundation-models"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_foundation_models( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/foundation-models", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_guardrails() + list_guardrails(params::Dict{String,<:Any}) + +Lists details about all the guardrails in an account. To list the DRAFT version of all your +guardrails, don't specify the guardrailIdentifier field. To list all versions of a +guardrail, specify the ARN of the guardrail in the guardrailIdentifier field. You can set +the maximum number of results to return in a response in the maxResults field. If there are +more results than the number you set, the response returns a nextToken that you can send in +another ListGuardrails request to see the next batch of results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"guardrailIdentifier"`: The unique identifier of the guardrail. +- `"maxResults"`: The maximum number of results to return in the response. +- `"nextToken"`: If there are more results than were returned in the response, the response + returns a nextToken that you can send in another ListGuardrails request to see the next + batch of results. +""" +function list_guardrails(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/guardrails"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_guardrails( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", "/guardrails", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_model_customization_jobs() + list_model_customization_jobs(params::Dict{String,<:Any}) + +Returns a list of model customization jobs that you have submitted. You can filter the jobs +to return based on one or more criteria. For more information, see Custom models in the +Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: Return customization jobs created after the specified time. +- `"creationTimeBefore"`: Return customization jobs created before the specified time. +- `"maxResults"`: Maximum number of results to return in the response. +- `"nameContains"`: Return customization jobs only if the job name contains these + characters. +- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list + the next set of results. +- `"sortBy"`: The field to sort by in the returned list of jobs. +- `"sortOrder"`: The sort order of the results. +- `"statusEquals"`: Return customization jobs with the specified status. +""" +function list_model_customization_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", + "/model-customization-jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_model_customization_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-customization-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_provisioned_model_throughputs() + list_provisioned_model_throughputs(params::Dict{String,<:Any}) + +Lists the Provisioned Throughputs in the account. For more information, see Provisioned +Throughput in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: A filter that returns Provisioned Throughputs created after the + specified time. +- `"creationTimeBefore"`: A filter that returns Provisioned Throughputs created before the + specified time. +- `"maxResults"`: THe maximum number of results to return in the response. If there are + more results than the number you specified, the response returns a nextToken value. To see + the next batch of results, send the nextToken value in another list request. +- `"modelArnEquals"`: A filter that returns Provisioned Throughputs whose model Amazon + Resource Name (ARN) is equal to the value that you specify. +- `"nameContains"`: A filter that returns Provisioned Throughputs if their name contains + the expression that you specify. +- `"nextToken"`: If there are more results than the number you specified in the maxResults + field, the response returns a nextToken value. To see the next batch of results, specify + the nextToken value in this field. +- `"sortBy"`: The field by which to sort the returned list of Provisioned Throughputs. +- `"sortOrder"`: The sort order of the results. +- `"statusEquals"`: A filter that returns Provisioned Throughputs if their statuses matches + the value that you specify. +""" +function list_provisioned_model_throughputs(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/provisioned-model-throughputs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_provisioned_model_throughputs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/provisioned-model-throughputs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List the tags associated with the specified resource. For more information, see Tagging +resources in the Amazon Bedrock User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/listTagsForResource", + Dict{String,Any}("resourceARN" => resourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/listTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceARN" => resourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_model_invocation_logging_configuration(logging_config) + put_model_invocation_logging_configuration(logging_config, params::Dict{String,<:Any}) + +Set the configuration values for model invocation logging. + +# Arguments +- `logging_config`: The logging configuration values to set. + +""" +function put_model_invocation_logging_configuration( + loggingConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "PUT", + "/logging/modelinvocations", + Dict{String,Any}("loggingConfig" => loggingConfig); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_model_invocation_logging_configuration( + loggingConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "PUT", + "/logging/modelinvocations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("loggingConfig" => loggingConfig), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_evaluation_job(job_identifier) + stop_evaluation_job(job_identifier, params::Dict{String,<:Any}) + +Stops an in progress model evaluation job. + +# Arguments +- `job_identifier`: The ARN of the model evaluation job you want to stop. + +""" +function stop_evaluation_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/evaluation-job/$(jobIdentifier)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_evaluation_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/evaluation-job/$(jobIdentifier)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_model_customization_job(job_identifier) + stop_model_customization_job(job_identifier, params::Dict{String,<:Any}) + +Stops an active model customization job. For more information, see Custom models in the +Amazon Bedrock User Guide. + +# Arguments +- `job_identifier`: Job identifier of the job to stop. + +""" +function stop_model_customization_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/model-customization-jobs/$(jobIdentifier)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_model_customization_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-customization-jobs/$(jobIdentifier)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associate tags with a resource. For more information, see Tagging resources in the Amazon +Bedrock User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: Tags to associate with the resource. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "POST", + "/tagResource", + Dict{String,Any}("resourceARN" => resourceARN, "tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/tagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceARN" => resourceARN, "tags" => tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Remove one or more tags from a resource. For more information, see Tagging resources in the +Amazon Bedrock User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to untag. +- `tag_keys`: Tag keys of the tags to remove from the resource. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/untagResource", + Dict{String,Any}("resourceARN" => resourceARN, "tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/untagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceARN" => resourceARN, "tagKeys" => tagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_guardrail(blocked_input_messaging, blocked_outputs_messaging, guardrail_identifier, name) + update_guardrail(blocked_input_messaging, blocked_outputs_messaging, guardrail_identifier, name, params::Dict{String,<:Any}) + +Updates a guardrail with the values you specify. Specify a name and optional description. + Specify messages for when the guardrail successfully blocks a prompt or a model response +in the blockedInputMessaging and blockedOutputsMessaging fields. Specify topics for the +guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the +topicsConfig list pertains to one topic. Give a name and description so that the +guardrail can properly identify the topic. Specify DENY in the type field. (Optional) +Provide up to five prompts that you would categorize as belonging to the topic in the +examples list. Specify filter strengths for the harmful categories defined in Amazon +Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the +filtersConfig list pertains to a harmful category. For more information, see Content +filters. For more information about the fields in a content filter, see +GuardrailContentFilterConfig. Specify the category in the type field. Specify the +strength of the filter for prompts in the inputStrength field and for model responses in +the strength field of the GuardrailContentFilterConfig. (Optional) For security, +include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the +guardrail in the tags object. For more information, see Tag resources. + +# Arguments +- `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. +- `blocked_outputs_messaging`: The message to return when the guardrail blocks a model + response. +- `guardrail_identifier`: The unique identifier of the guardrail +- `name`: A name for the guardrail. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"contentPolicyConfig"`: The content policy to configure for the guardrail. +- `"description"`: A description of the guardrail. +- `"kmsKeyId"`: The ARN of the KMS key with which to encrypt the guardrail. +- `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for + the guardrail. +- `"topicPolicyConfig"`: The topic policy to configure for the guardrail. +- `"wordPolicyConfig"`: The word policy to configure for the guardrail. +""" +function update_guardrail( + blockedInputMessaging, + blockedOutputsMessaging, + guardrailIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "PUT", + "/guardrails/$(guardrailIdentifier)", + Dict{String,Any}( + "blockedInputMessaging" => blockedInputMessaging, + "blockedOutputsMessaging" => blockedOutputsMessaging, + "name" => name, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_guardrail( + blockedInputMessaging, + blockedOutputsMessaging, + guardrailIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "PUT", + "/guardrails/$(guardrailIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "blockedInputMessaging" => blockedInputMessaging, + "blockedOutputsMessaging" => blockedOutputsMessaging, + "name" => name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_provisioned_model_throughput(provisioned_model_id) + update_provisioned_model_throughput(provisioned_model_id, params::Dict{String,<:Any}) + +Updates the name or associated model for a Provisioned Throughput. For more information, +see Provisioned Throughput in the Amazon Bedrock User Guide. + +# Arguments +- `provisioned_model_id`: The Amazon Resource Name (ARN) or name of the Provisioned + Throughput to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"desiredModelId"`: The Amazon Resource Name (ARN) of the new model to associate with + this Provisioned Throughput. You can't specify this field if this Provisioned Throughput is + associated with a base model. If this Provisioned Throughput is associated with a custom + model, you can specify one of the following options: The base model from which the custom + model was customized. Another custom model that was customized from the same base model + as the custom model. +- `"desiredProvisionedModelName"`: The new name for this Provisioned Throughput. +""" +function update_provisioned_model_throughput( + provisionedModelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "PATCH", + "/provisioned-model-throughput/$(provisionedModelId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_provisioned_model_throughput( + provisionedModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "PATCH", + "/provisioned-model-throughput/$(provisionedModelId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_agent.jl b/src/services/bedrock_agent.jl new file mode 100644 index 0000000000..5e087e7dd7 --- /dev/null +++ b/src/services/bedrock_agent.jl @@ -0,0 +1,1895 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: bedrock_agent +using AWS.Compat +using AWS.UUIDs + +""" + associate_agent_knowledge_base(agent_id, agent_version, description, knowledge_base_id) + associate_agent_knowledge_base(agent_id, agent_version, description, knowledge_base_id, params::Dict{String,<:Any}) + +Associates a knowledge base with an agent. If a knowledge base is associated and its +indexState is set to Enabled, the agent queries the knowledge base for information to +augment its response to the user. + +# Arguments +- `agent_id`: The unique identifier of the agent with which you want to associate the + knowledge base. +- `agent_version`: The version of the agent with which you want to associate the knowledge + base. +- `description`: A description of what the agent should use the knowledge base for. +- `knowledge_base_id`: The unique identifier of the knowledge base to associate with the + agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"knowledgeBaseState"`: Specifies whether to use the knowledge base or not when sending + an InvokeAgent request. +""" +function associate_agent_knowledge_base( + agentId, + agentVersion, + description, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/", + Dict{String,Any}( + "description" => description, "knowledgeBaseId" => knowledgeBaseId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_agent_knowledge_base( + agentId, + agentVersion, + description, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "description" => description, "knowledgeBaseId" => knowledgeBaseId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_agent(agent_name) + create_agent(agent_name, params::Dict{String,<:Any}) + +Creates an agent that orchestrates interactions between foundation models, data sources, +software applications, user conversations, and APIs to carry out tasks to help customers. +Specify the following fields for security purposes. agentResourceRoleArn – The Amazon +Resource Name (ARN) of the role with permissions to invoke API operations on an agent. +(Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to +encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the +number of seconds for which the agent should maintain session information. After this time +expires, the subsequent InvokeAgent request begins a new session. To override the +default prompt behavior for agent orchestration and to use advanced prompts, include a +promptOverrideConfiguration object. For more information, see Advanced prompts. If you +agent fails to be created, the response returns a list of failureReasons alongside a list +of recommendedActions for you to troubleshoot. + +# Arguments +- `agent_name`: A name for the agent that you create. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"agentResourceRoleArn"`: The Amazon Resource Name (ARN) of the IAM role with permissions + to invoke API operations on the agent. +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key with which to + encrypt the agent. +- `"description"`: A description of the agent. +- `"foundationModel"`: The foundation model to be used for orchestration by the agent you + create. +- `"guardrailConfiguration"`: The unique Guardrail configuration assigned to the agent when + it is created. +- `"idleSessionTTLInSeconds"`: The number of seconds for which Amazon Bedrock keeps + information about a user's conversation with the agent. A user interaction remains active + for the amount of time specified. If no conversation occurs during this time, the session + expires and Amazon Bedrock deletes any data provided before the timeout. +- `"instruction"`: Instructions that tell the agent what it should do and how it should + interact with users. +- `"promptOverrideConfiguration"`: Contains configurations to override prompts in different + parts of an agent sequence. For more information, see Advanced prompts. +- `"tags"`: Any tags that you want to attach to the agent. +""" +function create_agent(agentName; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "PUT", + "/agents/", + Dict{String,Any}("agentName" => agentName, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_agent( + agentName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "agentName" => agentName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_agent_action_group(action_group_name, agent_id, agent_version) + create_agent_action_group(action_group_name, agent_id, agent_version, params::Dict{String,<:Any}) + +Creates an action group for an agent. An action group represents the actions that an agent +can carry out for the customer by defining the APIs that an agent can call and the logic +for calling them. To allow your agent to request the user for additional information when +trying to complete a task, add an action group with the parentActionGroupSignature field +set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor +fields blank for this action group. During orchestration, if your agent determines that it +needs to invoke an API in an action group, but doesn't have enough information to complete +the API request, it will invoke this action group instead and return an Observation +reprompting the user for more information. + +# Arguments +- `action_group_name`: The name to give the action group. +- `agent_id`: The unique identifier of the agent for which to create the action group. +- `agent_version`: The version of the agent for which to create the action group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"actionGroupExecutor"`: The Amazon Resource Name (ARN) of the Lambda function containing + the business logic that is carried out upon invoking the action or the custom control + method for handling the information elicited from the user. +- `"actionGroupState"`: Specifies whether the action group is available for the agent to + invoke or not when sending an InvokeAgent request. +- `"apiSchema"`: Contains either details about the S3 object containing the OpenAPI schema + for the action group or the JSON or YAML-formatted payload defining the schema. For more + information, see Action group OpenAPI schemas. +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the action group. +- `"functionSchema"`: Contains details about the function schema for the action group or + the JSON or YAML-formatted payload defining the schema. +- `"parentActionGroupSignature"`: To allow your agent to request the user for additional + information when trying to complete a task, set this field to AMAZON.UserInput. You must + leave the description, apiSchema, and actionGroupExecutor fields blank for this action + group. During orchestration, if your agent determines that it needs to invoke an API in an + action group, but doesn't have enough information to complete the API request, it will + invoke this action group instead and return an Observation reprompting the user for more + information. +""" +function create_agent_action_group( + actionGroupName, + agentId, + agentVersion; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/", + Dict{String,Any}( + "actionGroupName" => actionGroupName, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_agent_action_group( + actionGroupName, + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "actionGroupName" => actionGroupName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_agent_alias(agent_alias_name, agent_id) + create_agent_alias(agent_alias_name, agent_id, params::Dict{String,<:Any}) + +Creates an alias of an agent that can be used to deploy the agent. + +# Arguments +- `agent_alias_name`: The name of the alias. +- `agent_id`: The unique identifier of the agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the alias of the agent. +- `"routingConfiguration"`: Contains details about the routing configuration of the alias. +- `"tags"`: Any tags that you want to attach to the alias of the agent. +""" +function create_agent_alias( + agentAliasName, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentaliases/", + Dict{String,Any}( + "agentAliasName" => agentAliasName, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_agent_alias( + agentAliasName, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentaliases/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "agentAliasName" => agentAliasName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_data_source(data_source_configuration, knowledge_base_id, name) + create_data_source(data_source_configuration, knowledge_base_id, name, params::Dict{String,<:Any}) + +Sets up a data source to be added to a knowledge base. You can't change the +chunkingConfiguration after you create the data source. + +# Arguments +- `data_source_configuration`: Contains metadata about where the data source is stored. +- `knowledge_base_id`: The unique identifier of the knowledge base to which to add the data + source. +- `name`: The name of the data source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"dataDeletionPolicy"`: The data deletion policy assigned to the data source. +- `"description"`: A description of the data source. +- `"serverSideEncryptionConfiguration"`: Contains details about the server-side encryption + for the data source. +- `"vectorIngestionConfiguration"`: Contains details about how to ingest the documents in + the data source. +""" +function create_data_source( + dataSourceConfiguration, + knowledgeBaseId, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/", + Dict{String,Any}( + "dataSourceConfiguration" => dataSourceConfiguration, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_source( + dataSourceConfiguration, + knowledgeBaseId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "dataSourceConfiguration" => dataSourceConfiguration, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration) + create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration, params::Dict{String,<:Any}) + +Creates a knowledge base that contains data sources from which information can be queried +and used by LLMs. To create a knowledge base, you must first set up your data sources and +configure a supported vector store. For more information, see Set up your data for +ingestion. If you prefer to let Amazon Bedrock create and manage a vector store for you in +Amazon OpenSearch Service, use the console. For more information, see Create a knowledge +base. Provide the name and an optional description. Provide the Amazon Resource Name +(ARN) with permissions to create a knowledge base in the roleArn field. Provide the +embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration +object. Provide the configuration for your vector store in the storageConfiguration +object. For an Amazon OpenSearch Service database, use the +opensearchServerlessConfiguration object. For more information, see Create a vector store +in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration +object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone +database, use the pineconeConfiguration object. For more information, see Create a vector +store in Pinecone. For a Redis Enterprise Cloud database, use the +redisEnterpriseCloudConfiguration object. For more information, see Create a vector store +in Redis Enterprise Cloud. + +# Arguments +- `knowledge_base_configuration`: Contains details about the embeddings model used for the + knowledge base. +- `name`: A name for the knowledge base. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API + operations on the knowledge base. +- `storage_configuration`: Contains details about the configuration of the vector database + used for the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the knowledge base. +- `"tags"`: Specify the key-value pairs for the tags that you want to attach to your + knowledge base in this object. +""" +function create_knowledge_base( + knowledgeBaseConfiguration, + name, + roleArn, + storageConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/", + Dict{String,Any}( + "knowledgeBaseConfiguration" => knowledgeBaseConfiguration, + "name" => name, + "roleArn" => roleArn, + "storageConfiguration" => storageConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_knowledge_base( + knowledgeBaseConfiguration, + name, + roleArn, + storageConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "knowledgeBaseConfiguration" => knowledgeBaseConfiguration, + "name" => name, + "roleArn" => roleArn, + "storageConfiguration" => storageConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_agent(agent_id) + delete_agent(agent_id, params::Dict{String,<:Any}) + +Deletes an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_agent(agentId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent( + agentId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_agent_action_group(action_group_id, agent_id, agent_version) + delete_agent_action_group(action_group_id, agent_id, agent_version, params::Dict{String,<:Any}) + +Deletes an action group in an agent. + +# Arguments +- `action_group_id`: The unique identifier of the action group to delete. +- `agent_id`: The unique identifier of the agent that the action group belongs to. +- `agent_version`: The version of the agent that the action group belongs to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_agent_action_group( + actionGroupId, agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_action_group( + actionGroupId, + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_agent_alias(agent_alias_id, agent_id) + delete_agent_alias(agent_alias_id, agent_id, params::Dict{String,<:Any}) + +Deletes an alias of an agent. + +# Arguments +- `agent_alias_id`: The unique identifier of the alias to delete. +- `agent_id`: The unique identifier of the agent that the alias belongs to. + +""" +function delete_agent_alias( + agentAliasId, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_alias( + agentAliasId, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_agent_version(agent_id, agent_version) + delete_agent_version(agent_id, agent_version, params::Dict{String,<:Any}) + +Deletes a version of an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent that the version belongs to. +- `agent_version`: The version of the agent to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_agent_version( + agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_version( + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_data_source(data_source_id, knowledge_base_id) + delete_data_source(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes a data source from a knowledge base. + +# Arguments +- `data_source_id`: The unique identifier of the data source to delete. +- `knowledge_base_id`: The unique identifier of the knowledge base from which to delete the + data source. + +""" +function delete_data_source( + dataSourceId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_source( + dataSourceId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_knowledge_base(knowledge_base_id) + delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) + +Deletes a knowledge base. Before deleting a knowledge base, you should disassociate the +knowledge base from any agents that it is associated with by making a +DisassociateAgentKnowledgeBase request. + +# Arguments +- `knowledge_base_id`: The unique identifier of the knowledge base to delete. + +""" +function delete_knowledge_base( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/knowledgebases/$(knowledgeBaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_knowledge_base( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/knowledgebases/$(knowledgeBaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) + disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) + +Disassociates a knowledge base from an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent from which to disassociate the knowledge + base. +- `agent_version`: The version of the agent from which to disassociate the knowledge base. +- `knowledge_base_id`: The unique identifier of the knowledge base to disassociate. + +""" +function disassociate_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent(agent_id) + get_agent(agent_id, params::Dict{String,<:Any}) + +Gets information about an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent. + +""" +function get_agent(agentId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/agents/$(agentId)/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_agent( + agentId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_action_group(action_group_id, agent_id, agent_version) + get_agent_action_group(action_group_id, agent_id, agent_version, params::Dict{String,<:Any}) + +Gets information about an action group for an agent. + +# Arguments +- `action_group_id`: The unique identifier of the action group for which to get information. +- `agent_id`: The unique identifier of the agent that the action group belongs to. +- `agent_version`: The version of the agent that the action group belongs to. + +""" +function get_agent_action_group( + actionGroupId, agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_action_group( + actionGroupId, + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_alias(agent_alias_id, agent_id) + get_agent_alias(agent_alias_id, agent_id, params::Dict{String,<:Any}) + +Gets information about an alias of an agent. + +# Arguments +- `agent_alias_id`: The unique identifier of the alias for which to get information. +- `agent_id`: The unique identifier of the agent to which the alias to get information + belongs. + +""" +function get_agent_alias( + agentAliasId, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_alias( + agentAliasId, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) + get_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) + +Gets information about a knowledge base associated with an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent with which the knowledge base is + associated. +- `agent_version`: The version of the agent with which the knowledge base is associated. +- `knowledge_base_id`: The unique identifier of the knowledge base associated with the + agent. + +""" +function get_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_version(agent_id, agent_version) + get_agent_version(agent_id, agent_version, params::Dict{String,<:Any}) + +Gets details about a version of an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent. +- `agent_version`: The version of the agent. + +""" +function get_agent_version( + agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_version( + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/agents/$(agentId)/agentversions/$(agentVersion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_data_source(data_source_id, knowledge_base_id) + get_data_source(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) + +Gets information about a data source. + +# Arguments +- `data_source_id`: The unique identifier of the data source. +- `knowledge_base_id`: The unique identifier of the knowledge base that the data source was + added to. + +""" +function get_data_source( + dataSourceId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source( + dataSourceId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingestion_job(data_source_id, ingestion_job_id, knowledge_base_id) + get_ingestion_job(data_source_id, ingestion_job_id, knowledge_base_id, params::Dict{String,<:Any}) + +Gets information about a ingestion job, in which a data source is added to a knowledge base. + +# Arguments +- `data_source_id`: The unique identifier of the data source in the ingestion job. +- `ingestion_job_id`: The unique identifier of the ingestion job. +- `knowledge_base_id`: The unique identifier of the knowledge base for which the ingestion + job applies. + +""" +function get_ingestion_job( + dataSourceId, + ingestionJobId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/$(ingestionJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingestion_job( + dataSourceId, + ingestionJobId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/$(ingestionJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_knowledge_base(knowledge_base_id) + get_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) + +Gets information about a knoweldge base. + +# Arguments +- `knowledge_base_id`: The unique identifier of the knowledge base for which to get + information. + +""" +function get_knowledge_base( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_knowledge_base( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/knowledgebases/$(knowledgeBaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_agent_action_groups(agent_id, agent_version) + list_agent_action_groups(agent_id, agent_version, params::Dict{String,<:Any}) + +Lists the action groups for an agent and information about each one. + +# Arguments +- `agent_id`: The unique identifier of the agent. +- `agent_version`: The version of the agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_agent_action_groups( + agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_agent_action_groups( + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_agent_aliases(agent_id) + list_agent_aliases(agent_id, params::Dict{String,<:Any}) + +Lists the aliases of an agent and information about each one. + +# Arguments +- `agent_id`: The unique identifier of the agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_agent_aliases(agentId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentaliases/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_agent_aliases( + agentId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentaliases/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_agent_knowledge_bases(agent_id, agent_version) + list_agent_knowledge_bases(agent_id, agent_version, params::Dict{String,<:Any}) + +Lists knowledge bases associated with an agent and information about each one. + +# Arguments +- `agent_id`: The unique identifier of the agent for which to return information about + knowledge bases associated with it. +- `agent_version`: The version of the agent for which to return information about knowledge + bases associated with it. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_agent_knowledge_bases( + agentId, agentVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_agent_knowledge_bases( + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_agent_versions(agent_id) + list_agent_versions(agent_id, params::Dict{String,<:Any}) + +Lists the versions of an agent and information about each version. + +# Arguments +- `agent_id`: The unique identifier of the agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_agent_versions(agentId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_agent_versions( + agentId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/agentversions/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_agents() + list_agents(params::Dict{String,<:Any}) + +Lists the agents belonging to an account and information about each agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_agents(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", "/agents/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_agents( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", "/agents/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_data_sources(knowledge_base_id) + list_data_sources(knowledge_base_id, params::Dict{String,<:Any}) + +Lists the data sources in a knowledge base and information about each one. + +# Arguments +- `knowledge_base_id`: The unique identifier of the knowledge base for which to return a + list of information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_data_sources( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/knowledgebases/$(knowledgeBaseId)/datasources/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_sources( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/knowledgebases/$(knowledgeBaseId)/datasources/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_ingestion_jobs(data_source_id, knowledge_base_id) + list_ingestion_jobs(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) + +Lists the ingestion jobs for a data source and information about each of them. + +# Arguments +- `data_source_id`: The unique identifier of the data source for which to return ingestion + jobs. +- `knowledge_base_id`: The unique identifier of the knowledge base for which to return + ingestion jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: Contains a definition of a filter for which to filter the results. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"sortBy"`: Contains details about how to sort the results. +""" +function list_ingestion_jobs( + dataSourceId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_ingestion_jobs( + dataSourceId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_knowledge_bases() + list_knowledge_bases(params::Dict{String,<:Any}) + +Lists the knowledge bases in an account and information about each of them. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_knowledge_bases(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", "/knowledgebases/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_knowledge_bases( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/knowledgebases/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List all the tags for the resource you specify. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to list tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + prepare_agent(agent_id) + prepare_agent(agent_id, params::Dict{String,<:Any}) + +Creates a DRAFT version of the agent that can be used for internal testing. + +# Arguments +- `agent_id`: The unique identifier of the agent for which to create a DRAFT version. + +""" +function prepare_agent(agentId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/agents/$(agentId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function prepare_agent( + agentId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/agents/$(agentId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_ingestion_job(data_source_id, knowledge_base_id) + start_ingestion_job(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) + +Begins an ingestion job, in which a data source is added to a knowledge base. + +# Arguments +- `data_source_id`: The unique identifier of the data source to ingest. +- `knowledge_base_id`: The unique identifier of the knowledge base to which to add the data + source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the ingestion job. +""" +function start_ingestion_job( + dataSourceId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_ingestion_job( + dataSourceId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)/ingestionjobs/", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associate tags with a resource. For more information, see Tagging resources in the Amazon +Bedrock User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: An object containing key-value pairs that define the tags to attach to the + resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Remove tags from a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource from which to remove tags. +- `tag_keys`: A list of keys of the tags to remove from the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_agent(agent_id, agent_name, agent_resource_role_arn, foundation_model) + update_agent(agent_id, agent_name, agent_resource_role_arn, foundation_model, params::Dict{String,<:Any}) + +Updates the configuration of an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent. +- `agent_name`: Specifies a new name for the agent. +- `agent_resource_role_arn`: The Amazon Resource Name (ARN) of the IAM role with + permissions to invoke API operations on the agent. +- `foundation_model`: Specifies a new foundation model to be used for orchestration by the + agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key with which to + encrypt the agent. +- `"description"`: Specifies a new description of the agent. +- `"guardrailConfiguration"`: The unique Guardrail configuration assigned to the agent when + it is updated. +- `"idleSessionTTLInSeconds"`: The number of seconds for which Amazon Bedrock keeps + information about a user's conversation with the agent. A user interaction remains active + for the amount of time specified. If no conversation occurs during this time, the session + expires and Amazon Bedrock deletes any data provided before the timeout. +- `"instruction"`: Specifies new instructions that tell the agent what it should do and how + it should interact with users. +- `"promptOverrideConfiguration"`: Contains configurations to override prompts in different + parts of an agent sequence. For more information, see Advanced prompts. +""" +function update_agent( + agentId, + agentName, + agentResourceRoleArn, + foundationModel; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/", + Dict{String,Any}( + "agentName" => agentName, + "agentResourceRoleArn" => agentResourceRoleArn, + "foundationModel" => foundationModel, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_agent( + agentId, + agentName, + agentResourceRoleArn, + foundationModel, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "agentName" => agentName, + "agentResourceRoleArn" => agentResourceRoleArn, + "foundationModel" => foundationModel, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_agent_action_group(action_group_id, action_group_name, agent_id, agent_version) + update_agent_action_group(action_group_id, action_group_name, agent_id, agent_version, params::Dict{String,<:Any}) + +Updates the configuration for an action group for an agent. + +# Arguments +- `action_group_id`: The unique identifier of the action group. +- `action_group_name`: Specifies a new name for the action group. +- `agent_id`: The unique identifier of the agent for which to update the action group. +- `agent_version`: The unique identifier of the agent version for which to update the + action group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"actionGroupExecutor"`: The Amazon Resource Name (ARN) of the Lambda function containing + the business logic that is carried out upon invoking the action. +- `"actionGroupState"`: Specifies whether the action group is available for the agent to + invoke or not when sending an InvokeAgent request. +- `"apiSchema"`: Contains either details about the S3 object containing the OpenAPI schema + for the action group or the JSON or YAML-formatted payload defining the schema. For more + information, see Action group OpenAPI schemas. +- `"description"`: Specifies a new name for the action group. +- `"functionSchema"`: Contains details about the function schema for the action group or + the JSON or YAML-formatted payload defining the schema. +- `"parentActionGroupSignature"`: To allow your agent to request the user for additional + information when trying to complete a task, set this field to AMAZON.UserInput. You must + leave the description, apiSchema, and actionGroupExecutor fields blank for this action + group. During orchestration, if your agent determines that it needs to invoke an API in an + action group, but doesn't have enough information to complete the API request, it will + invoke this action group instead and return an Observation reprompting the user for more + information. +""" +function update_agent_action_group( + actionGroupId, + actionGroupName, + agentId, + agentVersion; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/", + Dict{String,Any}("actionGroupName" => actionGroupName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_agent_action_group( + actionGroupId, + actionGroupName, + agentId, + agentVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/actiongroups/$(actionGroupId)/", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("actionGroupName" => actionGroupName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_agent_alias(agent_alias_id, agent_alias_name, agent_id) + update_agent_alias(agent_alias_id, agent_alias_name, agent_id, params::Dict{String,<:Any}) + +Updates configurations for an alias of an agent. + +# Arguments +- `agent_alias_id`: The unique identifier of the alias. +- `agent_alias_name`: Specifies a new name for the alias. +- `agent_id`: The unique identifier of the agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Specifies a new description for the alias. +- `"routingConfiguration"`: Contains details about the routing configuration of the alias. +""" +function update_agent_alias( + agentAliasId, agentAliasName, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/", + Dict{String,Any}("agentAliasName" => agentAliasName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_agent_alias( + agentAliasId, + agentAliasName, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentaliases/$(agentAliasId)/", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("agentAliasName" => agentAliasName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) + update_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) + +Updates the configuration for a knowledge base that has been associated with an agent. + +# Arguments +- `agent_id`: The unique identifier of the agent associated with the knowledge base that + you want to update. +- `agent_version`: The version of the agent associated with the knowledge base that you + want to update. +- `knowledge_base_id`: The unique identifier of the knowledge base that has been associated + with an agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Specifies a new description for the knowledge base associated with an + agent. +- `"knowledgeBaseState"`: Specifies whether the agent uses the knowledge base or not when + sending an InvokeAgent request. +""" +function update_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_agent_knowledge_base( + agentId, + agentVersion, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/agents/$(agentId)/agentversions/$(agentVersion)/knowledgebases/$(knowledgeBaseId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name) + update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name, params::Dict{String,<:Any}) + +Updates configurations for a data source. You can't change the chunkingConfiguration after +you create the data source. Specify the existing chunkingConfiguration. + +# Arguments +- `data_source_configuration`: Contains details about the storage configuration of the data + source. +- `data_source_id`: The unique identifier of the data source. +- `knowledge_base_id`: The unique identifier of the knowledge base to which the data source + belongs. +- `name`: Specifies a new name for the data source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataDeletionPolicy"`: The data deletion policy of the updated data source. +- `"description"`: Specifies a new description for the data source. +- `"serverSideEncryptionConfiguration"`: Contains details about server-side encryption of + the data source. +- `"vectorIngestionConfiguration"`: Contains details about how to ingest the documents in + the data source. +""" +function update_data_source( + dataSourceConfiguration, + dataSourceId, + knowledgeBaseId, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + Dict{String,Any}( + "dataSourceConfiguration" => dataSourceConfiguration, "name" => name + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_data_source( + dataSourceConfiguration, + dataSourceId, + knowledgeBaseId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "dataSourceConfiguration" => dataSourceConfiguration, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration) + update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration, params::Dict{String,<:Any}) + +Updates the configuration of a knowledge base with the fields that you specify. Because all +fields will be overwritten, you must include the same values for fields that you want to +keep the same. You can change the following fields: name description roleArn +You can't change the knowledgeBaseConfiguration or storageConfiguration fields, so you must +specify the same configurations as when you created the knowledge base. You can send a +GetKnowledgeBase request and copy the same configurations. + +# Arguments +- `knowledge_base_configuration`: Specifies the configuration for the embeddings model used + for the knowledge base. You must use the same configuration as when the knowledge base was + created. +- `knowledge_base_id`: The unique identifier of the knowledge base to update. +- `name`: Specifies a new name for the knowledge base. +- `role_arn`: Specifies a different Amazon Resource Name (ARN) of the IAM role with + permissions to invoke API operations on the knowledge base. +- `storage_configuration`: Specifies the configuration for the vector store used for the + knowledge base. You must use the same configuration as when the knowledge base was created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Specifies a new description for the knowledge base. +""" +function update_knowledge_base( + knowledgeBaseConfiguration, + knowledgeBaseId, + name, + roleArn, + storageConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)", + Dict{String,Any}( + "knowledgeBaseConfiguration" => knowledgeBaseConfiguration, + "name" => name, + "roleArn" => roleArn, + "storageConfiguration" => storageConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_knowledge_base( + knowledgeBaseConfiguration, + knowledgeBaseId, + name, + roleArn, + storageConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/knowledgebases/$(knowledgeBaseId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "knowledgeBaseConfiguration" => knowledgeBaseConfiguration, + "name" => name, + "roleArn" => roleArn, + "storageConfiguration" => storageConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_agent_runtime.jl b/src/services/bedrock_agent_runtime.jl new file mode 100644 index 0000000000..1468f14ec2 --- /dev/null +++ b/src/services/bedrock_agent_runtime.jl @@ -0,0 +1,154 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: bedrock_agent_runtime +using AWS.Compat +using AWS.UUIDs + +""" + invoke_agent(agent_alias_id, agent_id, session_id) + invoke_agent(agent_alias_id, agent_id, session_id, params::Dict{String,<:Any}) + + The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond +to. Note the following fields for the request: To continue the same conversation with an +agent, use the same sessionId value in the request. To activate trace enablement, turn +enableTrace to true. Trace enablement helps you follow the agent's reasoning process that +led it to the information it processed, the actions it took, and the final result it +yielded. For more information, see Trace enablement. End a conversation by setting +endSession to true. In the sessionState object, you can include attributes for the +session or prompt or, if you configured an action group to return control, results from +invocation of the action group. The response is returned in the bytes field of the chunk +object. The attribution object contains citations for parts of the response. If you set +enableTrace to true in the request, you can trace the agent's steps and reasoning process +that led it to the response. If the action predicted was configured to return control, +the response returns parameters for the action, elicited from the user, in the +returnControl field. Errors are also surfaced in the response. + +# Arguments +- `agent_alias_id`: The alias of the agent to use. +- `agent_id`: The unique identifier of the agent to use. +- `session_id`: The unique identifier of the session. Use the same value across requests to + continue the same conversation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"enableTrace"`: Specifies whether to turn on the trace or not to track the agent's + reasoning process. For more information, see Trace enablement. +- `"endSession"`: Specifies whether to end the session with the agent or not. +- `"inputText"`: The prompt text to send the agent. If you include + returnControlInvocationResults in the sessionState field, the inputText field will be + ignored. +- `"sessionState"`: Contains parameters that specify various attributes of the session. For + more information, see Control session context. If you include + returnControlInvocationResults in the sessionState field, the inputText field will be + ignored. +""" +function invoke_agent( + agentAliasId, agentId, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "POST", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/sessions/$(sessionId)/text"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_agent( + agentAliasId, + agentId, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/sessions/$(sessionId)/text", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + retrieve(knowledge_base_id, retrieval_query) + retrieve(knowledge_base_id, retrieval_query, params::Dict{String,<:Any}) + +Queries a knowledge base and retrieves information from it. + +# Arguments +- `knowledge_base_id`: The unique identifier of the knowledge base to query. +- `retrieval_query`: Contains the query to send the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"nextToken"`: If there are more results than can fit in the response, the response + returns a nextToken. Use this token in the nextToken field of another request to retrieve + the next batch of results. +- `"retrievalConfiguration"`: Contains configurations for the knowledge base query and + retrieval process. For more information, see Query configurations. +""" +function retrieve( + knowledgeBaseId, retrievalQuery; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "POST", + "/knowledgebases/$(knowledgeBaseId)/retrieve", + Dict{String,Any}("retrievalQuery" => retrievalQuery); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function retrieve( + knowledgeBaseId, + retrievalQuery, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/knowledgebases/$(knowledgeBaseId)/retrieve", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("retrievalQuery" => retrievalQuery), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + retrieve_and_generate(input) + retrieve_and_generate(input, params::Dict{String,<:Any}) + +Queries a knowledge base and generates responses based on the retrieved results. The +response only cites sources that are relevant to the query. + +# Arguments +- `input`: Contains the query to be made to the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"retrieveAndGenerateConfiguration"`: Contains configurations for the knowledge base + query and retrieval process. For more information, see Query configurations. +- `"sessionConfiguration"`: Contains details about the session with the knowledge base. +- `"sessionId"`: The unique identifier of the session. Reuse the same value to continue the + same session with the knowledge base. +""" +function retrieve_and_generate(input; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent_runtime( + "POST", + "/retrieveAndGenerate", + Dict{String,Any}("input" => input); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function retrieve_and_generate( + input, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "POST", + "/retrieveAndGenerate", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("input" => input), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_runtime.jl b/src/services/bedrock_runtime.jl new file mode 100644 index 0000000000..3f4cc4d7c6 --- /dev/null +++ b/src/services/bedrock_runtime.jl @@ -0,0 +1,292 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: bedrock_runtime +using AWS.Compat +using AWS.UUIDs + +""" + converse(messages, model_id) + converse(messages, model_id, params::Dict{String,<:Any}) + +Sends messages to the specified Amazon Bedrock model. Converse provides a consistent +interface that works with all models that support messages. This allows you to write code +once and use it with different models. Should a model have unique inference parameters, you +can also pass those unique parameters to the model. For information about the Converse API, +see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a +guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a +model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, +see Converse API examples in the Amazon Bedrock User Guide. This operation requires +permission for the bedrock:InvokeModel action. + +# Arguments +- `messages`: The messages that you want to send to the model. +- `model_id`: The identifier for the model that you want to call. The modelId to provide + depends on the type of model that you use: If you use a base model, specify the model ID + or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs + (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, + specify the ARN of the Provisioned Throughput. For more information, see Run inference + using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom + model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting + provisioned model. For more information, see Use a custom model in Amazon Bedrock in the + Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalModelRequestFields"`: Additional inference parameters that the model + supports, beyond the base set of inference parameters that Converse supports in the + inferenceConfig field. For more information, see Model parameters. +- `"additionalModelResponseFieldPaths"`: Additional model parameters field paths to return + in the response. Converse returns the requested fields as a JSON Pointer object in the + additionalModelResponseFields field. The following is example JSON for + additionalModelResponseFieldPaths. [ \"/stop_sequence\" ] For information about the JSON + Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. Converse + rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. + if the JSON Pointer is valid, but the requested field is not in the model response, it is + ignored by Converse. +- `"guardrailConfig"`: Configuration information for a guardrail that you want to use in + the request. +- `"inferenceConfig"`: Inference parameters to pass to the model. Converse supports a base + set of inference parameters. If you need to pass additional parameters that the model + supports, use the additionalModelRequestFields request field. +- `"system"`: A system prompt to pass to the model. +- `"toolConfig"`: Configuration information for the tools that the model can use when + generating a response. This field is only supported by Anthropic Claude 3, Cohere Command + R, Cohere Command R+, and Mistral Large models. +""" +function converse(messages, modelId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_runtime( + "POST", + "/model/$(modelId)/converse", + Dict{String,Any}("messages" => messages); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function converse( + messages, + modelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/converse", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("messages" => messages), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + converse_stream(messages, model_id) + converse_stream(messages, model_id, params::Dict{String,<:Any}) + +Sends messages to the specified Amazon Bedrock model and returns the response in a stream. +ConverseStream provides a consistent API that works with all Amazon Bedrock models that +support messages. This allows you to write code once and use it with different models. +Should a model have unique inference parameters, you can also pass those unique parameters +to the model. To find out if a model supports streaming, call GetFoundationModel and check +the responseStreamingSupported field in the response. For information about the Converse +API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use +a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a +model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, +see Conversation streaming example in the Amazon Bedrock User Guide. This operation +requires permission for the bedrock:InvokeModelWithResponseStream action. + +# Arguments +- `messages`: The messages that you want to send to the model. +- `model_id`: The ID for the model. The modelId to provide depends on the type of model + that you use: If you use a base model, specify the model ID or its ARN. For a list of + model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the + Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the + Provisioned Throughput. For more information, see Run inference using a Provisioned + Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase + Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For + more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User + Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalModelRequestFields"`: Additional inference parameters that the model + supports, beyond the base set of inference parameters that ConverseStream supports in the + inferenceConfig field. +- `"additionalModelResponseFieldPaths"`: Additional model parameters field paths to return + in the response. ConverseStream returns the requested fields as a JSON Pointer object in + the additionalModelResponseFields field. The following is example JSON for + additionalModelResponseFieldPaths. [ \"/stop_sequence\" ] For information about the JSON + Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. + ConverseStream rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a + 400 error code. if the JSON Pointer is valid, but the requested field is not in the model + response, it is ignored by ConverseStream. +- `"guardrailConfig"`: Configuration information for a guardrail that you want to use in + the request. +- `"inferenceConfig"`: Inference parameters to pass to the model. ConverseStream supports a + base set of inference parameters. If you need to pass additional parameters that the model + supports, use the additionalModelRequestFields request field. +- `"system"`: A system prompt to send to the model. +- `"toolConfig"`: Configuration information for the tools that the model can use when + generating a response. This field is only supported by Anthropic Claude 3 models. +""" +function converse_stream( + messages, modelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/converse-stream", + Dict{String,Any}("messages" => messages); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function converse_stream( + messages, + modelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/converse-stream", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("messages" => messages), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + invoke_model(body, model_id) + invoke_model(body, model_id, params::Dict{String,<:Any}) + +Invokes the specified Amazon Bedrock model to run inference using the prompt and inference +parameters provided in the request body. You use model inference to generate text, images, +and embeddings. For example code, see Invoke model code examples in the Amazon Bedrock User +Guide. This operation requires permission for the bedrock:InvokeModel action. + +# Arguments +- `body`: The prompt and inference parameters in the format specified in the contentType in + the header. You must provide the body in JSON format. To see the format and content of the + request and response bodies for different models, refer to Inference parameters. For more + information, see Run inference in the Bedrock User Guide. +- `model_id`: The unique identifier of the model to invoke to run inference. The modelId to + provide depends on the type of model that you use: If you use a base model, specify the + model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model + IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned + model, specify the ARN of the Provisioned Throughput. For more information, see Run + inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a + custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the + resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock + in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Accept"`: The desired MIME type of the inference body in the response. The default + value is application/json. +- `"Content-Type"`: The MIME type of the input data in the request. You must specify + application/json. +- `"X-Amzn-Bedrock-GuardrailIdentifier"`: The unique identifier of the guardrail that you + want to use. If you don't provide a value, no guardrail is applied to the invocation. An + error will be thrown in the following situations. You don't provide a guardrail + identifier but you specify the amazon-bedrock-guardrailConfig field in the request body. + You enable the guardrail but the contentType isn't application/json. You provide a + guardrail identifier, but guardrailVersion isn't specified. +- `"X-Amzn-Bedrock-GuardrailVersion"`: The version number for the guardrail. The value can + also be DRAFT. +- `"X-Amzn-Bedrock-Trace"`: Specifies whether to enable or disable the Bedrock trace. If + enabled, you can see the full Bedrock trace. +""" +function invoke_model(body, modelId; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_runtime( + "POST", + "/model/$(modelId)/invoke", + Dict{String,Any}("body" => body); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_model( + body, + modelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/invoke", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("body" => body), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + invoke_model_with_response_stream(body, model_id) + invoke_model_with_response_stream(body, model_id, params::Dict{String,<:Any}) + +Invoke the specified Amazon Bedrock model to run inference using the prompt and inference +parameters provided in the request body. The response is returned in a stream. To see if a +model supports streaming, call GetFoundationModel and check the responseStreamingSupported +field in the response. The CLI doesn't support InvokeModelWithResponseStream. For example +code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This +operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action. + +# Arguments +- `body`: The prompt and inference parameters in the format specified in the contentType in + the header. You must provide the body in JSON format. To see the format and content of the + request and response bodies for different models, refer to Inference parameters. For more + information, see Run inference in the Bedrock User Guide. +- `model_id`: The unique identifier of the model to invoke to run inference. The modelId to + provide depends on the type of model that you use: If you use a base model, specify the + model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model + IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned + model, specify the ARN of the Provisioned Throughput. For more information, see Run + inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a + custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the + resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock + in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Content-Type"`: The MIME type of the input data in the request. You must specify + application/json. +- `"X-Amzn-Bedrock-Accept"`: The desired MIME type of the inference body in the response. + The default value is application/json. +- `"X-Amzn-Bedrock-GuardrailIdentifier"`: The unique identifier of the guardrail that you + want to use. If you don't provide a value, no guardrail is applied to the invocation. An + error is thrown in the following situations. You don't provide a guardrail identifier but + you specify the amazon-bedrock-guardrailConfig field in the request body. You enable the + guardrail but the contentType isn't application/json. You provide a guardrail identifier, + but guardrailVersion isn't specified. +- `"X-Amzn-Bedrock-GuardrailVersion"`: The version number for the guardrail. The value can + also be DRAFT. +- `"X-Amzn-Bedrock-Trace"`: Specifies whether to enable or disable the Bedrock trace. If + enabled, you can see the full Bedrock trace. +""" +function invoke_model_with_response_stream( + body, modelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/invoke-with-response-stream", + Dict{String,Any}("body" => body); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_model_with_response_stream( + body, + modelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/model/$(modelId)/invoke-with-response-stream", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("body" => body), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/billingconductor.jl b/src/services/billingconductor.jl index 64b8311620..cbb982d6fa 100644 --- a/src/services/billingconductor.jl +++ b/src/services/billingconductor.jl @@ -194,7 +194,7 @@ Services charges, based off of the predefined pricing plan computation. # Arguments - `account_grouping`: The set of accounts that will be under the billing group. The set of - accounts resemble the linked accounts in a consolidated family. + accounts resemble the linked accounts in a consolidated billing family. - `computation_preference`: The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group. - `name`: The billing group name. The names must be unique. @@ -258,7 +258,7 @@ end create_custom_line_item(billing_group_arn, charge_details, description, name) create_custom_line_item(billing_group_arn, charge_details, description, name, params::Dict{String,<:Any}) - Creates a custom line item that can be used to create a one-time fixed charge that can be +Creates a custom line item that can be used to create a one-time fixed charge that can be applied to a single billing group for the current or previous billing period. The one-time fixed charge is either a fee or discount. @@ -273,6 +273,8 @@ fixed charge is either a fee or discount. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The Amazon Web Services account in which this custom line item will be + applied to. - `"BillingPeriodRange"`: A time range for which the custom line item is effective. - `"Tags"`: A map that contains tag keys and tag values that are attached to a custom line item. @@ -670,6 +672,49 @@ function disassociate_pricing_rules( ) end +""" + get_billing_group_cost_report(arn) + get_billing_group_cost_report(arn, params::Dict{String,<:Any}) + +Retrieves the margin summary report, which includes the Amazon Web Services cost and +charged amount (pro forma cost) by Amazon Web Service for a specific billing group. + +# Arguments +- `arn`: The Amazon Resource Number (ARN) that uniquely identifies the billing group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"BillingPeriodRange"`: A time range for which the margin summary is effective. You can + specify up to 12 months. +- `"GroupBy"`: A list of strings that specify the attributes that are used to break down + costs in the margin summary reports for the billing group. For example, you can view your + costs by the Amazon Web Service name or the billing period. +- `"MaxResults"`: The maximum number of margin summary reports to retrieve. +- `"NextToken"`: The pagination token used on subsequent calls to get reports. +""" +function get_billing_group_cost_report( + Arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return billingconductor( + "POST", + "/get-billing-group-cost-report", + Dict{String,Any}("Arn" => Arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_billing_group_cost_report( + Arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return billingconductor( + "POST", + "/get-billing-group-cost-report", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Arn" => Arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_account_associations() list_account_associations(params::Dict{String,<:Any}) @@ -1178,6 +1223,8 @@ This updates an existing billing group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountGrouping"`: Specifies if the billing group has automatic account association + (AutoAssociate) enabled. - `"ComputationPreference"`: The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group. - `"Description"`: A description of the billing group. diff --git a/src/services/braket.jl b/src/services/braket.jl index 67ab1f7661..9df683dfcb 100644 --- a/src/services/braket.jl +++ b/src/services/braket.jl @@ -97,6 +97,7 @@ Creates an Amazon Braket job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"associations"`: The list of Amazon Braket resources associated with the hybrid job. - `"checkpointConfig"`: Information about the output locations for job checkpoint data. - `"hyperParameters"`: Algorithm-specific parameters used by an Amazon Braket job that influence the quality of the training job. The values are set with a string of JSON @@ -185,6 +186,7 @@ Creates a quantum task. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"associations"`: The list of Amazon Braket resources associated with the quantum task. - `"deviceParameters"`: The parameters for the device to run the task on. - `"jobToken"`: The token for an Amazon Braket job that associates it with the quantum task. - `"tags"`: Tags to be added to the quantum task you're creating. @@ -293,6 +295,9 @@ Retrieves the specified Amazon Braket job. # Arguments - `job_arn`: The ARN of the job to retrieve. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalAttributeNames"`: A list of attributes to return information for. """ function get_job(jobArn; aws_config::AbstractAWSConfig=global_aws_config()) return braket( @@ -318,8 +323,11 @@ end Retrieves the specified quantum task. # Arguments -- `quantum_task_arn`: the ARN of the task to retrieve. +- `quantum_task_arn`: The ARN of the task to retrieve. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalAttributeNames"`: A list of attributes to return information for. """ function get_quantum_task(quantumTaskArn; aws_config::AbstractAWSConfig=global_aws_config()) return braket( diff --git a/src/services/budgets.jl b/src/services/budgets.jl index 2025e4b919..b6d85f1ff5 100644 --- a/src/services/budgets.jl +++ b/src/services/budgets.jl @@ -24,6 +24,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys subscriber and up to 10 email subscribers. If you include notifications and subscribers in your CreateBudget call, Amazon Web Services creates the notifications and subscribers for you. +- `"ResourceTags"`: An optional list of tags to associate with the specified budget. Each + tag consists of a key and a value, and each key must be unique for the resource. """ function create_budget(AccountId, Budget; aws_config::AbstractAWSConfig=global_aws_config()) return budgets( @@ -72,6 +74,10 @@ end - `notification_type`: - `subscribers`: +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceTags"`: An optional list of tags to associate with the specified budget action. + Each tag consists of a key and a value, and each key must be unique for the resource. """ function create_budget_action( AccountId, @@ -710,8 +716,8 @@ end # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: An integer that shows how many budget name entries a paginated response - contains. +- `"MaxResults"`: An integer that represents how many budgets a paginated response + contains. The default is 50. - `"NextToken"`: """ function describe_budget_notifications_for_account( @@ -795,13 +801,12 @@ Lists the budgets that are associated with an account. The Request Syntax secti the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples section. # Arguments -- `account_id`: The accountId that is associated with the budgets that you want - descriptions of. +- `account_id`: The accountId that is associated with the budgets that you want to describe. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: An optional integer that represents how many entries a paginated response - contains. The maximum is 100. +- `"MaxResults"`: An integer that represents how many budgets a paginated response + contains. The default is 100. - `"NextToken"`: The pagination token that you include in your request to indicate the next set of results that you want to retrieve. """ @@ -842,7 +847,7 @@ Lists the notifications that are associated with a budget. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: An optional integer that represents how many entries a paginated response - contains. The maximum is 100. + contains. - `"NextToken"`: The pagination token that you include in your request to indicate the next set of results that you want to retrieve. """ @@ -891,7 +896,7 @@ Lists the subscribers that are associated with a notification. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: An optional integer that represents how many entries a paginated response - contains. The maximum is 100. + contains. - `"NextToken"`: The pagination token that you include in your request to indicate the next set of results that you want to retrieve. """ @@ -993,6 +998,129 @@ function execute_budget_action( ) end +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists tags associated with a budget or budget action resource. + +# Arguments +- `resource_arn`: The unique identifier for the resource. + +""" +function list_tags_for_resource( + ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return budgets( + "ListTagsForResource", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return budgets( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, resource_tags) + tag_resource(resource_arn, resource_tags, params::Dict{String,<:Any}) + +Creates tags for a budget or budget action resource. + +# Arguments +- `resource_arn`: The unique identifier for the resource. +- `resource_tags`: The tags associated with the resource. + +""" +function tag_resource( + ResourceARN, ResourceTags; aws_config::AbstractAWSConfig=global_aws_config() +) + return budgets( + "TagResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "ResourceTags" => ResourceTags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceARN, + ResourceTags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return budgets( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ResourceTags" => ResourceTags + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, resource_tag_keys) + untag_resource(resource_arn, resource_tag_keys, params::Dict{String,<:Any}) + +Deletes tags associated with a budget or budget action resource. + +# Arguments +- `resource_arn`: The unique identifier for the resource. +- `resource_tag_keys`: The key that's associated with the tag. + +""" +function untag_resource( + ResourceARN, ResourceTagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return budgets( + "UntagResource", + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ResourceTagKeys" => ResourceTagKeys + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceARN, + ResourceTagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return budgets( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ResourceTagKeys" => ResourceTagKeys + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_budget(account_id, new_budget) update_budget(account_id, new_budget, params::Dict{String,<:Any}) diff --git a/src/services/chatbot.jl b/src/services/chatbot.jl new file mode 100644 index 0000000000..40054a1731 --- /dev/null +++ b/src/services/chatbot.jl @@ -0,0 +1,1210 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: chatbot +using AWS.Compat +using AWS.UUIDs + +""" + create_chime_webhook_configuration(configuration_name, iam_role_arn, sns_topic_arns, webhook_description, webhook_url) + create_chime_webhook_configuration(configuration_name, iam_role_arn, sns_topic_arns, webhook_description, webhook_url, params::Dict{String,<:Any}) + +Creates Chime Webhook Configuration + +# Arguments +- `configuration_name`: The name of the configuration. +- `iam_role_arn`: This is a user-defined role that AWS Chatbot will assume. This is not the + service-linked role. For more information, see IAM Policies for AWS Chatbot. +- `sns_topic_arns`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `webhook_description`: Description of the webhook. Recommend using the convention + `RoomName/WebhookName`. See Chime setup tutorial for more details: + https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. +- `webhook_url`: URL for the Chime webhook. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"Tags"`: A list of tags to apply to the configuration. +""" +function create_chime_webhook_configuration( + ConfigurationName, + IamRoleArn, + SnsTopicArns, + WebhookDescription, + WebhookUrl; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-chime-webhook-configuration", + Dict{String,Any}( + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "SnsTopicArns" => SnsTopicArns, + "WebhookDescription" => WebhookDescription, + "WebhookUrl" => WebhookUrl, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_chime_webhook_configuration( + ConfigurationName, + IamRoleArn, + SnsTopicArns, + WebhookDescription, + WebhookUrl, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-chime-webhook-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "SnsTopicArns" => SnsTopicArns, + "WebhookDescription" => WebhookDescription, + "WebhookUrl" => WebhookUrl, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_microsoft_teams_channel_configuration(channel_id, configuration_name, iam_role_arn, team_id, tenant_id) + create_microsoft_teams_channel_configuration(channel_id, configuration_name, iam_role_arn, team_id, tenant_id, params::Dict{String,<:Any}) + +Creates MS Teams Channel Configuration + +# Arguments +- `channel_id`: The ID of the Microsoft Teams channel. +- `configuration_name`: The name of the configuration. +- `iam_role_arn`: The ARN of the IAM role that defines the permissions for AWS Chatbot. + This is a user-defined role that AWS Chatbot will assume. This is not the service-linked + role. For more information, see IAM Policies for AWS Chatbot. +- `team_id`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, + you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot + console. Then you can copy and paste the team ID from the console. For more details, see + steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. +- `tenant_id`: The ID of the Microsoft Teams tenant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChannelName"`: The name of the Microsoft Teams channel. +- `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel + guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is + not set. +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"Tags"`: A list of tags to apply to the configuration. +- `"TeamName"`: The name of the Microsoft Teams Team. +- `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat + configuration. +""" +function create_microsoft_teams_channel_configuration( + ChannelId, + ConfigurationName, + IamRoleArn, + TeamId, + TenantId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-ms-teams-channel-configuration", + Dict{String,Any}( + "ChannelId" => ChannelId, + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "TeamId" => TeamId, + "TenantId" => TenantId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_microsoft_teams_channel_configuration( + ChannelId, + ConfigurationName, + IamRoleArn, + TeamId, + TenantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-ms-teams-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChannelId" => ChannelId, + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "TeamId" => TeamId, + "TenantId" => TenantId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_slack_channel_configuration(configuration_name, iam_role_arn, slack_channel_id, slack_team_id) + create_slack_channel_configuration(configuration_name, iam_role_arn, slack_channel_id, slack_team_id, params::Dict{String,<:Any}) + +Creates Slack Channel Configuration + +# Arguments +- `configuration_name`: The name of the configuration. +- `iam_role_arn`: The ARN of the IAM role that defines the permissions for AWS Chatbot. + This is a user-defined role that AWS Chatbot will assume. This is not the service-linked + role. For more information, see IAM Policies for AWS Chatbot. +- `slack_channel_id`: The ID of the Slack channel. To get the ID, open Slack, right click + on the channel name in the left pane, then choose Copy Link. The channel ID is the + 9-character string at the end of the URL. For example, ABCBBLZZZ. +- `slack_team_id`: The ID of the Slack workspace authorized with AWS Chatbot. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel + guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is + not set. +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"SlackChannelName"`: The name of the Slack Channel. +- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"Tags"`: A list of tags to apply to the configuration. +- `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat + configuration. +""" +function create_slack_channel_configuration( + ConfigurationName, + IamRoleArn, + SlackChannelId, + SlackTeamId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-slack-channel-configuration", + Dict{String,Any}( + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "SlackChannelId" => SlackChannelId, + "SlackTeamId" => SlackTeamId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_slack_channel_configuration( + ConfigurationName, + IamRoleArn, + SlackChannelId, + SlackTeamId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/create-slack-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationName" => ConfigurationName, + "IamRoleArn" => IamRoleArn, + "SlackChannelId" => SlackChannelId, + "SlackTeamId" => SlackTeamId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_chime_webhook_configuration(chat_configuration_arn) + delete_chime_webhook_configuration(chat_configuration_arn, params::Dict{String,<:Any}) + +Deletes a Chime Webhook Configuration + +# Arguments +- `chat_configuration_arn`: The ARN of the ChimeWebhookConfiguration to delete. + +""" +function delete_chime_webhook_configuration( + ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-chime-webhook-configuration", + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_chime_webhook_configuration( + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-chime-webhook-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_microsoft_teams_channel_configuration(chat_configuration_arn) + delete_microsoft_teams_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) + +Deletes MS Teams Channel Configuration + +# Arguments +- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to delete. + +""" +function delete_microsoft_teams_channel_configuration( + ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-ms-teams-channel-configuration", + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_microsoft_teams_channel_configuration( + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-ms-teams-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_microsoft_teams_configured_team(team_id) + delete_microsoft_teams_configured_team(team_id, params::Dict{String,<:Any}) + +Deletes the Microsoft Teams team authorization allowing for channels to be configured in +that Microsoft Teams team. Note that the Microsoft Teams team must have no channels +configured to remove it. + +# Arguments +- `team_id`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, + you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot + console. Then you can copy and paste the team ID from the console. For more details, see + steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. + +""" +function delete_microsoft_teams_configured_team( + TeamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-ms-teams-configured-teams", + Dict{String,Any}("TeamId" => TeamId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_microsoft_teams_configured_team( + TeamId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-ms-teams-configured-teams", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("TeamId" => TeamId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_microsoft_teams_user_identity(chat_configuration_arn, user_id) + delete_microsoft_teams_user_identity(chat_configuration_arn, user_id, params::Dict{String,<:Any}) + +Deletes a Teams user identity + +# Arguments +- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration associated + with the user identity to delete. +- `user_id`: Id from Microsoft Teams for user. + +""" +function delete_microsoft_teams_user_identity( + ChatConfigurationArn, UserId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-ms-teams-user-identity", + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, "UserId" => UserId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_microsoft_teams_user_identity( + ChatConfigurationArn, + UserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-ms-teams-user-identity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, "UserId" => UserId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_slack_channel_configuration(chat_configuration_arn) + delete_slack_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) + +Deletes Slack Channel Configuration + +# Arguments +- `chat_configuration_arn`: The ARN of the SlackChannelConfiguration to delete. + +""" +function delete_slack_channel_configuration( + ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-slack-channel-configuration", + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_slack_channel_configuration( + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-slack-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_slack_user_identity(chat_configuration_arn, slack_team_id, slack_user_id) + delete_slack_user_identity(chat_configuration_arn, slack_team_id, slack_user_id, params::Dict{String,<:Any}) + +Deletes a Slack user identity + +# Arguments +- `chat_configuration_arn`: The ARN of the SlackChannelConfiguration associated with the + user identity to delete. +- `slack_team_id`: The ID of the Slack workspace authorized with AWS Chatbot. +- `slack_user_id`: The ID of the user in Slack. + +""" +function delete_slack_user_identity( + ChatConfigurationArn, + SlackTeamId, + SlackUserId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-slack-user-identity", + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, + "SlackTeamId" => SlackTeamId, + "SlackUserId" => SlackUserId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_slack_user_identity( + ChatConfigurationArn, + SlackTeamId, + SlackUserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-slack-user-identity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, + "SlackTeamId" => SlackTeamId, + "SlackUserId" => SlackUserId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_slack_workspace_authorization(slack_team_id) + delete_slack_workspace_authorization(slack_team_id, params::Dict{String,<:Any}) + +Deletes the Slack workspace authorization that allows channels to be configured in that +workspace. This requires all configured channels in the workspace to be deleted. + +# Arguments +- `slack_team_id`: The ID of the Slack workspace authorized with AWS Chatbot. + +""" +function delete_slack_workspace_authorization( + SlackTeamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/delete-slack-workspace-authorization", + Dict{String,Any}("SlackTeamId" => SlackTeamId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_slack_workspace_authorization( + SlackTeamId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/delete-slack-workspace-authorization", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SlackTeamId" => SlackTeamId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_chime_webhook_configurations() + describe_chime_webhook_configurations(params::Dict{String,<:Any}) + +Lists Chime Webhook Configurations optionally filtered by ChatConfigurationArn + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChatConfigurationArn"`: An optional ARN of a ChimeWebhookConfiguration to describe. +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function describe_chime_webhook_configurations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-chime-webhook-configurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_chime_webhook_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-chime-webhook-configurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_slack_channel_configurations() + describe_slack_channel_configurations(params::Dict{String,<:Any}) + +Lists Slack Channel Configurations optionally filtered by ChatConfigurationArn + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChatConfigurationArn"`: An optional ARN of a SlackChannelConfiguration to describe. +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function describe_slack_channel_configurations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-slack-channel-configurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_slack_channel_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-slack-channel-configurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_slack_user_identities() + describe_slack_user_identities(params::Dict{String,<:Any}) + +Lists all Slack user identities with a mapped role. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChatConfigurationArn"`: The ARN of the SlackChannelConfiguration associated with the + user identities to describe. +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function describe_slack_user_identities(; aws_config::AbstractAWSConfig=global_aws_config()) + return chatbot( + "POST", + "/describe-slack-user-identities"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_slack_user_identities( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-slack-user-identities", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_slack_workspaces() + describe_slack_workspaces(params::Dict{String,<:Any}) + +Lists all authorized Slack Workspaces for AWS Account + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function describe_slack_workspaces(; aws_config::AbstractAWSConfig=global_aws_config()) + return chatbot( + "POST", + "/describe-slack-workspaces"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_slack_workspaces( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/describe-slack-workspaces", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_account_preferences() + get_account_preferences(params::Dict{String,<:Any}) + +Get Chatbot account level preferences + +""" +function get_account_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) + return chatbot( + "POST", + "/get-account-preferences"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_account_preferences( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/get-account-preferences", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_microsoft_teams_channel_configuration(chat_configuration_arn) + get_microsoft_teams_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) + +Get a single MS Teams Channel Configurations + +# Arguments +- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to retrieve. + +""" +function get_microsoft_teams_channel_configuration( + ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/get-ms-teams-channel-configuration", + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_microsoft_teams_channel_configuration( + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/get-ms-teams-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_microsoft_teams_channel_configurations() + list_microsoft_teams_channel_configurations(params::Dict{String,<:Any}) + +Lists MS Teams Channel Configurations optionally filtered by TeamId + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +- `"TeamId"`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, + you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot + console. Then you can copy and paste the team ID from the console. For more details, see + steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. +""" +function list_microsoft_teams_channel_configurations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-channel-configurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_microsoft_teams_channel_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-channel-configurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_microsoft_teams_configured_teams() + list_microsoft_teams_configured_teams(params::Dict{String,<:Any}) + +Lists all authorized MS teams for AWS Account + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function list_microsoft_teams_configured_teams(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-configured-teams"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_microsoft_teams_configured_teams( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-configured-teams", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_microsoft_teams_user_identities() + list_microsoft_teams_user_identities(params::Dict{String,<:Any}) + +Lists all Microsoft Teams user identities with a mapped role. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChatConfigurationArn"`: The ARN of the MicrosoftTeamsChannelConfiguration associated + with the user identities to list. +- `"MaxResults"`: The maximum number of results to include in the response. If more results + exist than the specified MaxResults value, a token is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional token returned from a prior request. Use this token for + pagination of results from this action. If this parameter is specified, the response + includes only results beyond the token, up to the value specified by MaxResults. +""" +function list_microsoft_teams_user_identities(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-user-identities"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_microsoft_teams_user_identities( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-ms-teams-user-identities", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Retrieves the list of tags applied to a configuration. + +# Arguments +- `resource_arn`: The ARN of the configuration. + +""" +function list_tags_for_resource( + ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/list-tags-for-resource", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/list-tags-for-resource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Applies the supplied tags to a configuration. + +# Arguments +- `resource_arn`: The ARN of the configuration. +- `tags`: A list of tags to apply to the configuration. + +""" +function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return chatbot( + "POST", + "/tag-resource", + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceARN, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/tag-resource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes the supplied tags from a configuration + +# Arguments +- `resource_arn`: The ARN of the configuration. +- `tag_keys`: A list of tag keys to remove from the configuration. + +""" +function untag_resource( + ResourceARN, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/untag-resource", + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceARN, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/untag-resource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_account_preferences() + update_account_preferences(params::Dict{String,<:Any}) + +Update Chatbot account level preferences + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"TrainingDataCollectionEnabled"`: Turns on training data collection. This helps improve + the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer + information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot + generated responses, and interaction data. This data helps us to continuously improve and + develop Artificial Intelligence (AI) technologies. Your data is not shared with any third + parties and is protected using sophisticated controls to prevent unauthorized access and + misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for + training AWS Chatbot’s AI technologies. +- `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat + configuration. +""" +function update_account_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) + return chatbot( + "POST", + "/update-account-preferences"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_account_preferences( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/update-account-preferences", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_chime_webhook_configuration(chat_configuration_arn) + update_chime_webhook_configuration(chat_configuration_arn, params::Dict{String,<:Any}) + +Updates a Chime Webhook Configuration + +# Arguments +- `chat_configuration_arn`: The ARN of the ChimeWebhookConfiguration to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. + This is a user-defined role that AWS Chatbot will assume. This is not the service-linked + role. For more information, see IAM Policies for AWS Chatbot. +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"WebhookDescription"`: Description of the webhook. Recommend using the convention + `RoomName/WebhookName`. See Chime setup tutorial for more details: + https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. +- `"WebhookUrl"`: URL for the Chime webhook. +""" +function update_chime_webhook_configuration( + ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/update-chime-webhook-configuration", + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_chime_webhook_configuration( + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/update-chime-webhook-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChatConfigurationArn" => ChatConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_microsoft_teams_channel_configuration(channel_id, chat_configuration_arn) + update_microsoft_teams_channel_configuration(channel_id, chat_configuration_arn, params::Dict{String,<:Any}) + +Updates MS Teams Channel Configuration + +# Arguments +- `channel_id`: The ID of the Microsoft Teams channel. +- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChannelName"`: The name of the Microsoft Teams channel. +- `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel + guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is + not set. +- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. + This is a user-defined role that AWS Chatbot will assume. This is not the service-linked + role. For more information, see IAM Policies for AWS Chatbot. +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat + configuration. +""" +function update_microsoft_teams_channel_configuration( + ChannelId, ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/update-ms-teams-channel-configuration", + Dict{String,Any}( + "ChannelId" => ChannelId, "ChatConfigurationArn" => ChatConfigurationArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_microsoft_teams_channel_configuration( + ChannelId, + ChatConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/update-ms-teams-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChannelId" => ChannelId, "ChatConfigurationArn" => ChatConfigurationArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_slack_channel_configuration(chat_configuration_arn, slack_channel_id) + update_slack_channel_configuration(chat_configuration_arn, slack_channel_id, params::Dict{String,<:Any}) + +Updates Slack Channel Configuration + +# Arguments +- `chat_configuration_arn`: The ARN of the SlackChannelConfiguration to update. +- `slack_channel_id`: The ID of the Slack channel. To get the ID, open Slack, right click + on the channel name in the left pane, then choose Copy Link. The channel ID is the + 9-character string at the end of the URL. For example, ABCBBLZZZ. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel + guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is + not set. +- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. + This is a user-defined role that AWS Chatbot will assume. This is not the service-linked + role. For more information, see IAM Policies for AWS Chatbot. +- `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. +- `"SlackChannelName"`: The name of the Slack Channel. +- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat + configuration. +""" +function update_slack_channel_configuration( + ChatConfigurationArn, SlackChannelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chatbot( + "POST", + "/update-slack-channel-configuration", + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, + "SlackChannelId" => SlackChannelId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_slack_channel_configuration( + ChatConfigurationArn, + SlackChannelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chatbot( + "POST", + "/update-slack-channel-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChatConfigurationArn" => ChatConfigurationArn, + "SlackChannelId" => SlackChannelId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/chime.jl b/src/services/chime.jl index cd7d74bd2a..099728e69e 100644 --- a/src/services/chime.jl +++ b/src/services/chime.jl @@ -51,7 +51,11 @@ end associate_phone_numbers_with_voice_connector(e164_phone_numbers, voice_connector_id) associate_phone_numbers_with_voice_connector(e164_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) -Associates phone numbers with the specified Amazon Chime Voice Connector. +Associates phone numbers with the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +AssociatePhoneNumbersWithVoiceConnector, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -97,7 +101,11 @@ end associate_phone_numbers_with_voice_connector_group(e164_phone_numbers, voice_connector_group_id) associate_phone_numbers_with_voice_connector_group(e164_phone_numbers, voice_connector_group_id, params::Dict{String,<:Any}) -Associates phone numbers with the specified Amazon Chime Voice Connector group. +Associates phone numbers with the specified Amazon Chime Voice Connector group. This API +is is no longer supported and will not be updated. We recommend using the latest version, +AssociatePhoneNumbersWithVoiceConnectorGroup, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -188,9 +196,12 @@ end batch_create_attendee(attendees, meeting_id) batch_create_attendee(attendees, meeting_id, params::Dict{String,<:Any}) - Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For more -information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide. +Creates up to 100 new attendees for an active Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +BatchCreateAttendee, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. For more information about the Amazon +Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. # Arguments - `attendees`: The request containing the attendees to create. @@ -229,7 +240,11 @@ end batch_create_channel_membership(member_arns, channel_arn) batch_create_channel_membership(member_arns, channel_arn, params::Dict{String,<:Any}) -Adds a specified number of users to a channel. +Adds a specified number of users to a channel. This API is is no longer supported and +will not be updated. We recommend using the latest version, BatchCreateChannelMembership, +in the Amazon Chime SDK. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `member_arns`: The ARNs of the members you want to add to the channel. @@ -409,9 +424,9 @@ end Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon -Chime Accounts in the account types, in the Amazon Chime Administration Guide. -Previously suspended users who are unsuspended using this action are returned to Registered -status. Users who are not previously suspended are ignored. +Chime Accounts in the account types, in the Amazon Chime Administration Guide. Previously +suspended users who are unsuspended using this action are returned to Registered status. +Users who are not previously suspended are ignored. # Arguments - `user_id_list`: The request containing the user IDs to unsuspend. @@ -580,7 +595,10 @@ end Creates an Amazon Chime SDK messaging AppInstance under an AWS account. Only SDK messaging customers use this API. CreateAppInstance supports idempotency behavior as described in the -AWS API Standard. +AWS API Standard. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateAppInstance, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The ClientRequestToken of the AppInstance. @@ -630,9 +648,12 @@ end create_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) Promotes an AppInstanceUser to an AppInstanceAdmin. The promoted user can perform the -following actions. ChannelModerator actions across all channels in the AppInstance. -DeleteChannelMessage actions. Only an AppInstanceUser can be promoted to an -AppInstanceAdmin role. +following actions. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateAppInstanceAdmin, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. +ChannelModerator actions across all channels in the AppInstance. DeleteChannelMessage +actions. Only an AppInstanceUser can be promoted to an AppInstanceAdmin role. # Arguments - `app_instance_admin_arn`: The ARN of the administrator of the current AppInstance. @@ -676,7 +697,11 @@ end create_app_instance_user(app_instance_arn, app_instance_user_id, client_request_token, name, params::Dict{String,<:Any}) Creates a user under an Amazon Chime AppInstance. The request consists of a unique -appInstanceUserId and Name for that user. +appInstanceUserId and Name for that user. This API is is no longer supported and will not +be updated. We recommend using the latest version, CreateAppInstanceUser, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance request. @@ -743,7 +768,10 @@ end Creates a new attendee for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer -Guide. +Guide. This API is is no longer supported and will not be updated. We recommend using +the latest version, CreateAttendee, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `external_user_id`: The Amazon Chime SDK external user ID. An idempotency token. Links @@ -830,7 +858,11 @@ end Creates a channel to which you can add users and send messages. Restriction: You can't change a channel's privacy. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, CreateChannel, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the channel request. @@ -902,7 +934,10 @@ To undo a ban, you first have to DeleteChannelBan, and then CreateChannelMembers are cleaned up when you delete users or channels. If you ban a user who is already part of a channel, that user is automatically kicked from the channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call -as the value in the header. +as the value in the header. This API is is no longer supported and will not be updated. +We recommend using the latest version, CreateChannelBan, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `member_arn`: The ARN of the member being banned. @@ -950,7 +985,11 @@ messages Leave the channel Privacy settings impact this action as follows: Channels: You do not need to be a member to list messages, but you must be a member to send messages. Private Channels: You must be a member to list or send messages. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, CreateChannelMembership, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `member_arn`: The ARN of the member you want to add to the channel. @@ -1003,7 +1042,11 @@ Creates a new ChannelModerator. A channel moderator can: Add and remove other the channel. Add and remove other moderators of the channel. Add and remove user bans for the channel. Redact messages in the channel. List messages in the channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, CreateChannelModerator, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_moderator_arn`: The ARN of the moderator. @@ -1049,7 +1092,11 @@ end create_media_capture_pipeline(sink_arn, sink_type, source_arn, source_type) create_media_capture_pipeline(sink_arn, sink_type, source_arn, source_type, params::Dict{String,<:Any}) -Creates a media capture pipeline. +Creates a media capture pipeline. This API is is no longer supported and will not be +updated. We recommend using the latest version, CreateMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `sink_arn`: The ARN of the sink type. @@ -1120,10 +1167,14 @@ end create_meeting(client_request_token) create_meeting(client_request_token, params::Dict{String,<:Any}) - Creates a new Amazon Chime SDK meeting in the specified media Region with no initial +Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime SDK Developer Guide . For more information about the Amazon -Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . +Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. This +API is is no longer supported and will not be updated. We recommend using the latest +version, CreateMeeting, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The unique identifier for the client request. Use a different @@ -1180,6 +1231,7 @@ forth) to initiate an outbound call to a public switched telephone network (PSTN them into a Chime meeting. Also ensures that the From number belongs to the customer. To play welcome audio or implement an interactive voice response (IVR), use the CreateSipMediaApplicationCall action with the corresponding SIP media application ID. +This API is is not available in a dedicated namespace. # Arguments - `from_phone_number`: Phone number used as the caller ID when the remote party receives a @@ -1243,7 +1295,11 @@ end Creates a new Amazon Chime SDK meeting in the specified media Region, with attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime SDK Developer Guide . For more information about the Amazon Chime SDK, see -Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . +Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . This API is is no +longer supported and will not be updated. We recommend using the latest version, +CreateMeetingWithAttendees, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The unique identifier for the client request. Use a different @@ -1347,7 +1403,10 @@ end create_proxy_session(capabilities, participant_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) Creates a proxy session on the specified Amazon Chime Voice Connector for the specified -participant phone numbers. +participant phone numbers. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateProxySession, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `capabilities`: The proxy session capabilities. @@ -1500,7 +1559,11 @@ end create_sip_media_application(aws_region, endpoints, name) create_sip_media_application(aws_region, endpoints, name, params::Dict{String,<:Any}) -Creates a SIP media application. +Creates a SIP media application. This API is is no longer supported and will not be +updated. We recommend using the latest version, CreateSipMediaApplication, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `aws_region`: The AWS Region assigned to the SIP media application. @@ -1551,7 +1614,11 @@ end create_sip_media_application_call(from_phone_number, to_phone_number, sip_media_application_id, params::Dict{String,<:Any}) Creates an outbound call to a phone number from the phone number specified in the request, -and it invokes the endpoint of the specified sipMediaApplicationId. +and it invokes the endpoint of the specified sipMediaApplicationId. This API is is no +longer supported and will not be updated. We recommend using the latest version, +CreateSipMediaApplicationCall, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `from_phone_number`: The phone number that a user calls from. This is a phone number in @@ -1608,7 +1675,10 @@ end create_sip_rule(name, target_applications, trigger_type, trigger_value, params::Dict{String,<:Any}) Creates a SIP rule which can be used to run a SIP media application as a target for a -specific trigger type. +specific trigger type. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateSipRule, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the SIP rule. @@ -1718,10 +1788,14 @@ end create_voice_connector(name, require_encryption, params::Dict{String,<:Any}) Creates an Amazon Chime Voice Connector under the administrator's AWS account. You can -choose to create an Amazon Chime Voice Connector in a specific AWS Region. Enabling +choose to create an Amazon Chime Voice Connector in a specific AWS Region. Enabling CreateVoiceConnectorRequestRequireEncryption configures your Amazon Chime Voice Connector to use TLS transport for SIP signaling and Secure RTP (SRTP) for media. Inbound calls use -TLS transport, and unencrypted outbound calls are blocked. +TLS transport, and unencrypted outbound calls are blocked. This API is is no longer +supported and will not be updated. We recommend using the latest version, +CreateVoiceConnector, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector. @@ -1773,7 +1847,11 @@ Creates an Amazon Chime Voice Connector group under the administrator's AWS acco can associate Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems in the request. You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for -fallback in case of availability events. +fallback in case of availability events. This API is is no longer supported and will not +be updated. We recommend using the latest version, CreateVoiceConnectorGroup, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector group. @@ -1847,7 +1925,11 @@ end delete_app_instance(app_instance_arn) delete_app_instance(app_instance_arn, params::Dict{String,<:Any}) -Deletes an AppInstance and all associated data asynchronously. +Deletes an AppInstance and all associated data asynchronously. This API is is no longer +supported and will not be updated. We recommend using the latest version, +DeleteAppInstance, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -1882,6 +1964,10 @@ end delete_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) Demotes an AppInstanceAdmin to an AppInstanceUser. This action does not delete the user. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteAppInstanceAdmin, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_admin_arn`: The ARN of the AppInstance's administrator. @@ -1917,7 +2003,11 @@ end delete_app_instance_streaming_configurations(app_instance_arn) delete_app_instance_streaming_configurations(app_instance_arn, params::Dict{String,<:Any}) -Deletes the streaming configurations of an AppInstance. +Deletes the streaming configurations of an AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +DeleteAppInstanceStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the streaming configurations being deleted. @@ -1951,7 +2041,10 @@ end delete_app_instance_user(app_instance_user_arn) delete_app_instance_user(app_instance_user_arn, params::Dict{String,<:Any}) -Deletes an AppInstanceUser. +Deletes an AppInstanceUser. This API is is no longer supported and will not be updated. +We recommend using the latest version, DeleteAppInstanceUser, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_user_arn`: The ARN of the user request being deleted. @@ -1988,7 +2081,10 @@ end Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the -Amazon Chime SDK Developer Guide. +Amazon Chime SDK Developer Guide. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteAttendee, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -2027,6 +2123,10 @@ end Immediately makes a channel and its memberships inaccessible and marks them for deletion. This is an irreversible process. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannel, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel being deleted. @@ -2063,7 +2163,10 @@ end Removes a user from a channel's ban list. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, DeleteChannelBan, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which the AppInstanceUser was banned. @@ -2104,6 +2207,10 @@ end Removes a member from a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelMembership, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which you want to remove the user. @@ -2145,7 +2252,11 @@ end Deletes a channel message. Only admins can perform this action. Deletion makes messages inaccessible immediately. A background process deletes any revisions created by UpdateChannelMessage. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelMessage, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2185,7 +2296,11 @@ end delete_channel_moderator(channel_arn, channel_moderator_arn, params::Dict{String,<:Any}) Deletes a channel moderator. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelModerator, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2260,7 +2375,11 @@ end delete_media_capture_pipeline(media_pipeline_id) delete_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) -Deletes the media capture pipeline. +Deletes the media capture pipeline. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `media_pipeline_id`: The ID of the media capture pipeline being deleted. @@ -2297,7 +2416,10 @@ end Deletes the specified Amazon Chime SDK meeting. The operation deletes all attendees, disconnects all clients, and prevents new clients from joining the meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide. +SDK Developer Guide. This API is is no longer supported and will not be updated. We +recommend using the latest version, DeleteMeeting, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -2366,7 +2488,11 @@ end delete_proxy_session(proxy_session_id, voice_connector_id) delete_proxy_session(proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) -Deletes the specified proxy session from the specified Amazon Chime Voice Connector. +Deletes the specified proxy session from the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteProxySession, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `proxy_session_id`: The proxy session ID. @@ -2474,7 +2600,11 @@ end delete_sip_media_application(sip_media_application_id) delete_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) -Deletes a SIP media application. +Deletes a SIP media application. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteSipMediaApplication, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -2508,7 +2638,11 @@ end delete_sip_rule(sip_rule_id) delete_sip_rule(sip_rule_id, params::Dict{String,<:Any}) -Deletes a SIP rule. You must disable a SIP rule before you can delete it. +Deletes a SIP rule. You must disable a SIP rule before you can delete it. This API is is +no longer supported and will not be updated. We recommend using the latest version, +DeleteSipRule, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_rule_id`: The SIP rule ID. @@ -2541,7 +2675,11 @@ end delete_voice_connector(voice_connector_id, params::Dict{String,<:Any}) Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the -Amazon Chime Voice Connector must be disassociated from it before it can be deleted. +Amazon Chime Voice Connector must be disassociated from it before it can be deleted. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteVoiceConnector, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2576,7 +2714,11 @@ end delete_voice_connector_emergency_calling_configuration(voice_connector_id, params::Dict{String,<:Any}) Deletes the emergency calling configuration details from the specified Amazon Chime Voice -Connector. +Connector. This API is is no longer supported and will not be updated. We recommend using +the latest version, DeleteVoiceConnectorEmergencyCallingConfiguration, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2611,7 +2753,11 @@ end delete_voice_connector_group(voice_connector_group_id, params::Dict{String,<:Any}) Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems and phone -numbers associated with the group must be removed before it can be deleted. +numbers associated with the group must be removed before it can be deleted. This API is +is no longer supported and will not be updated. We recommend using the latest version, +DeleteVoiceConnectorGroup, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_group_id`: The Amazon Chime Voice Connector group ID. @@ -2647,7 +2793,11 @@ end Deletes the origination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted -prior to deleting the origination settings. +prior to deleting the origination settings. This API is is no longer supported and will +not be updated. We recommend using the latest version, DeleteVoiceConnectorOrigination, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2681,7 +2831,11 @@ end delete_voice_connector_proxy(voice_connector_id) delete_voice_connector_proxy(voice_connector_id, params::Dict{String,<:Any}) -Deletes the proxy configuration from the specified Amazon Chime Voice Connector. +Deletes the proxy configuration from the specified Amazon Chime Voice Connector. This API +is is no longer supported and will not be updated. We recommend using the latest version, +DeleteVoiceProxy, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2715,7 +2869,11 @@ end delete_voice_connector_streaming_configuration(voice_connector_id) delete_voice_connector_streaming_configuration(voice_connector_id, params::Dict{String,<:Any}) -Deletes the streaming configuration for the specified Amazon Chime Voice Connector. +Deletes the streaming configuration for the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2751,7 +2909,11 @@ end Deletes the termination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted -prior to deleting the termination settings. +prior to deleting the termination settings. This API is is no longer supported and will +not be updated. We recommend using the latest version, DeleteVoiceConnectorTermination, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2786,7 +2948,11 @@ end delete_voice_connector_termination_credentials(usernames, voice_connector_id, params::Dict{String,<:Any}) Deletes the specified SIP credentials used by your equipment to authenticate during call -termination. +termination. This API is is no longer supported and will not be updated. We recommend +using the latest version, DeleteVoiceConnectorTerminationCredentials, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `usernames`: The RFC2617 compliant username associated with the SIP credentials, in @@ -2826,7 +2992,11 @@ end describe_app_instance(app_instance_arn) describe_app_instance(app_instance_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstance. +Returns the full details of an AppInstance. This API is is no longer supported and will +not be updated. We recommend using the latest version, DescribeAppInstance, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -2860,7 +3030,11 @@ end describe_app_instance_admin(app_instance_admin_arn, app_instance_arn) describe_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstanceAdmin. +Returns the full details of an AppInstanceAdmin. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeAppInstanceAdmin, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `app_instance_admin_arn`: The ARN of the AppInstanceAdmin. @@ -2896,7 +3070,11 @@ end describe_app_instance_user(app_instance_user_arn) describe_app_instance_user(app_instance_user_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstanceUser. +Returns the full details of an AppInstanceUser. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeAppInstanceUser, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `app_instance_user_arn`: The ARN of the AppInstanceUser. @@ -2932,7 +3110,11 @@ end Returns the full details of a channel in an Amazon Chime AppInstance. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeChannel, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2969,7 +3151,10 @@ end Returns the full details of a channel ban. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, DescribeChannelBan, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which the user is banned. @@ -3010,7 +3195,10 @@ end Returns the full details of a user's channel membership. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, DescribeChannelMembership, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3051,7 +3239,11 @@ end Returns the details of a channel based on the membership of the specified AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user -that makes the API call as the value in the header. +that makes the API call as the value in the header. This API is is no longer supported +and will not be updated. We recommend using the latest version, +DescribeChannelMembershipForAppInstanceUser, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-user-arn`: The ARN of the user in a channel. @@ -3099,7 +3291,11 @@ end Returns the full details of a channel moderated by the specified AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, +DescribeChannelModeratedByAppInstanceUser, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-user-arn`: The ARN of the AppInstanceUser in the moderated channel. @@ -3147,7 +3343,10 @@ end Returns the full details of a single ChannelModerator. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, DescribeChannelModerator, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3223,6 +3422,10 @@ end disassociate_phone_numbers_from_voice_connector(e164_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector. + This API is is no longer supported and will not be updated. We recommend using the latest +version, DisassociatePhoneNumbersFromVoiceConnector, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -3264,7 +3467,10 @@ end disassociate_phone_numbers_from_voice_connector_group(e164_phone_numbers, voice_connector_group_id, params::Dict{String,<:Any}) Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector -group. +group. This API is is no longer supported and will not be updated. We recommend using the +latest version, DisassociatePhoneNumbersFromVoiceConnectorGroup, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -3413,7 +3619,11 @@ end get_app_instance_retention_settings(app_instance_arn) get_app_instance_retention_settings(app_instance_arn, params::Dict{String,<:Any}) -Gets the retention settings for an AppInstance. +Gets the retention settings for an AppInstance. This API is is no longer supported and +will not be updated. We recommend using the latest version, GetMessagingRetentionSettings, +in the Amazon Chime SDK. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -3447,7 +3657,11 @@ end get_app_instance_streaming_configurations(app_instance_arn) get_app_instance_streaming_configurations(app_instance_arn, params::Dict{String,<:Any}) -Gets the streaming settings for an AppInstance. +Gets the streaming settings for an AppInstance. This API is is no longer supported and +will not be updated. We recommend using the latest version, +GetMessagingStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -3483,7 +3697,10 @@ end Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon -Chime SDK Developer Guide . +Chime SDK Developer Guide. This API is is no longer supported and will not be updated. +We recommend using the latest version, GetAttendee, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -3556,7 +3773,10 @@ end Gets the full details of a channel message. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, GetChannelMessage, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3651,7 +3871,11 @@ end get_media_capture_pipeline(media_pipeline_id) get_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) -Gets an existing media capture pipeline. +Gets an existing media capture pipeline. This API is is no longer supported and will not +be updated. We recommend using the latest version, GetMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `media_pipeline_id`: The ID of the pipeline that you want to get. @@ -3685,9 +3909,12 @@ end get_meeting(meeting_id) get_meeting(meeting_id, params::Dict{String,<:Any}) - Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more -information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide . + This API is is no longer supported and will not be updated. We recommend using the latest +version, GetMeeting, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. Gets the Amazon Chime SDK meeting +details for the specified meeting ID. For more information about the Amazon Chime SDK, see +Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -3719,7 +3946,11 @@ end get_messaging_session_endpoint() get_messaging_session_endpoint(params::Dict{String,<:Any}) -The details of the endpoint for the messaging session. +The details of the endpoint for the messaging session. This API is is no longer supported +and will not be updated. We recommend using the latest version, +GetMessagingSessionEndpoint, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. """ function get_messaging_session_endpoint(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3843,6 +4074,10 @@ end get_proxy_session(proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) Gets the specified proxy session details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetProxySession, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `proxy_session_id`: The proxy session ID. @@ -3950,7 +4185,10 @@ end get_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) Retrieves the information for a SIP media application, including name, AWS Region, and -endpoints. +endpoints. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetSipMediaApplication, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -3984,7 +4222,11 @@ end get_sip_media_application_logging_configuration(sip_media_application_id) get_sip_media_application_logging_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Returns the logging configuration for the specified SIP media application. +Returns the logging configuration for the specified SIP media application. This API is is +no longer supported and will not be updated. We recommend using the latest version, +GetSipMediaApplicationLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -4019,7 +4261,10 @@ end get_sip_rule(sip_rule_id, params::Dict{String,<:Any}) Retrieves the details of a SIP rule, such as the rule ID, name, triggers, and target -endpoints. +endpoints. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetSipRule, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_rule_id`: The SIP rule ID. @@ -4124,7 +4369,11 @@ end get_voice_connector(voice_connector_id, params::Dict{String,<:Any}) Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps,name, -outbound host, and encryption requirements. +outbound host, and encryption requirements. This API is is no longer supported and will +not be updated. We recommend using the latest version, GetVoiceConnector, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4159,7 +4408,11 @@ end get_voice_connector_emergency_calling_configuration(voice_connector_id, params::Dict{String,<:Any}) Gets the emergency calling configuration details for the specified Amazon Chime Voice -Connector. +Connector. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetVoiceConnectorEmergencyCallingConfiguration, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4194,7 +4447,11 @@ end get_voice_connector_group(voice_connector_group_id, params::Dict{String,<:Any}) Retrieves details for the specified Amazon Chime Voice Connector group, such as -timestamps,name, and associated VoiceConnectorItems. +timestamps,name, and associated VoiceConnectorItems. This API is is no longer supported +and will not be updated. We recommend using the latest version, GetVoiceConnectorGroup, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_group_id`: The Amazon Chime Voice Connector group ID. @@ -4229,7 +4486,11 @@ end get_voice_connector_logging_configuration(voice_connector_id, params::Dict{String,<:Any}) Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. -Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs. +Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs. This +API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4264,6 +4525,10 @@ end get_voice_connector_origination(voice_connector_id, params::Dict{String,<:Any}) Retrieves origination setting details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorOrigination, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4297,7 +4562,11 @@ end get_voice_connector_proxy(voice_connector_id) get_voice_connector_proxy(voice_connector_id, params::Dict{String,<:Any}) -Gets the proxy configuration details for the specified Amazon Chime Voice Connector. +Gets the proxy configuration details for the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorProxy, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime voice connector ID. @@ -4333,7 +4602,11 @@ end Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also -shows the retention period, in hours, for the Amazon Kinesis data. +shows the retention period, in hours, for the Amazon Kinesis data. This API is is no +longer supported and will not be updated. We recommend using the latest version, +GetVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4368,6 +4641,10 @@ end get_voice_connector_termination(voice_connector_id, params::Dict{String,<:Any}) Retrieves termination setting details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorTermination, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4401,6 +4678,10 @@ end get_voice_connector_termination_health(voice_connector_id) get_voice_connector_termination_health(voice_connector_id, params::Dict{String,<:Any}) + This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorTerminationHealth, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. Retrieves information about the last time a SIP OPTIONS ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector. @@ -4506,7 +4787,11 @@ end list_app_instance_admins(app_instance_arn) list_app_instance_admins(app_instance_arn, params::Dict{String,<:Any}) -Returns a list of the administrators in the AppInstance. +Returns a list of the administrators in the AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListAppInstanceAdmins, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -4545,7 +4830,11 @@ end list_app_instance_users(app-instance-arn) list_app_instance_users(app-instance-arn, params::Dict{String,<:Any}) -List all AppInstanceUsers created under a single AppInstance. +List all AppInstanceUsers created under a single AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListAppInstanceUsers, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-arn`: The ARN of the AppInstance. @@ -4589,7 +4878,11 @@ end list_app_instances() list_app_instances(params::Dict{String,<:Any}) -Lists all Amazon Chime AppInstances created under a single AWS account. +Lists all Amazon Chime AppInstances created under a single AWS account. This API is is no +longer supported and will not be updated. We recommend using the latest version, +ListAppInstances, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4618,7 +4911,9 @@ end list_attendee_tags(attendee_id, meeting_id) list_attendee_tags(attendee_id, meeting_id, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK attendee resource. +Lists the tags applied to an Amazon Chime SDK attendee resource. ListAttendeeTags is not +supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -4656,7 +4951,10 @@ end Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer -Guide. +Guide. This API is is no longer supported and will not be updated. We recommend using +the latest version, ListAttendees, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -4731,7 +5029,10 @@ end Lists all the users banned from a particular channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannelBans, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4771,7 +5072,10 @@ end Lists all channel memberships in a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, ListChannelMemberships, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The maximum number of channel memberships that you want returned. @@ -4817,7 +5121,11 @@ end Lists all channels that a particular AppInstanceUser is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, +ListChannelMembershipsForAppInstanceUser, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4858,7 +5166,11 @@ default, sorted by creation timestamp in descending order. Redacted messages ap results as empty, since they are only redacted, not deleted. Deleted messages do not appear in the results. This action always returns the latest version of an edited message. Also, the x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user -that makes the API call as the value in the header. +that makes the API call as the value in the header. This API is is no longer supported +and will not be updated. We recommend using the latest version, ListChannelMessages, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4904,7 +5216,10 @@ end Lists all the moderators for a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, ListChannelModerators, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4949,7 +5264,10 @@ filters to narrow results. Functionality & restrictions Use privacy = PU retrieve all public channels in the account. Only an AppInstanceAdmin can set privacy = PRIVATE to list the private channels in an account. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannels, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-arn`: The ARN of the AppInstance. @@ -4996,7 +5314,11 @@ end A list of the channels moderated by an AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannelsModeratedByAppInstanceUser, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5032,7 +5354,11 @@ end list_media_capture_pipelines() list_media_capture_pipelines(params::Dict{String,<:Any}) -Returns a list of media capture pipelines. +Returns a list of media capture pipelines. This API is is no longer supported and will +not be updated. We recommend using the latest version, ListMediaCapturePipelines, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5064,7 +5390,11 @@ end list_meeting_tags(meeting_id) list_meeting_tags(meeting_id, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK meeting resource. +Lists the tags applied to an Amazon Chime SDK meeting resource. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListTagsForResource, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -5096,8 +5426,10 @@ end list_meetings() list_meetings(params::Dict{String,<:Any}) - Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon -Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. +Lists up to 100 active Amazon Chime SDK meetings. ListMeetings is not supported in the +Amazon Chime SDK Meetings Namespace. Update your application to remove calls to this API. +For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the +Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5183,7 +5515,11 @@ end list_proxy_sessions(voice_connector_id) list_proxy_sessions(voice_connector_id, params::Dict{String,<:Any}) -Lists the proxy sessions for the specified Amazon Chime Voice Connector. +Lists the proxy sessions for the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +ListProxySessions, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime voice connector ID. @@ -5302,7 +5638,11 @@ end list_sip_media_applications() list_sip_media_applications(params::Dict{String,<:Any}) -Lists the SIP media applications under the administrator's AWS account. +Lists the SIP media applications under the administrator's AWS account. This API is is no +longer supported and will not be updated. We recommend using the latest version, +ListSipMediaApplications, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5334,7 +5674,11 @@ end list_sip_rules() list_sip_rules(params::Dict{String,<:Any}) -Lists the SIP rules under the administrator's AWS account. +Lists the SIP rules under the administrator's AWS account. This API is is no longer +supported and will not be updated. We recommend using the latest version, ListSipRules, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5397,7 +5741,12 @@ end list_tags_for_resource(arn) list_tags_for_resource(arn, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK meeting resource. +Lists the tags applied to an Amazon Chime SDK meeting and messaging resources. This API +is is no longer supported and will not be updated. We recommend using the applicable latest +version in the Amazon Chime SDK. For meetings: ListTagsForResource. For messaging: +ListTagsForResource. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `arn`: The resource ARN. @@ -5468,7 +5817,11 @@ end list_voice_connector_groups() list_voice_connector_groups(params::Dict{String,<:Any}) -Lists the Amazon Chime Voice Connector groups for the administrator's AWS account. +Lists the Amazon Chime Voice Connector groups for the administrator's AWS account. This +API is is no longer supported and will not be updated. We recommend using the latest +version, ListVoiceConnectorGroups, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5499,7 +5852,11 @@ end list_voice_connector_termination_credentials(voice_connector_id) list_voice_connector_termination_credentials(voice_connector_id, params::Dict{String,<:Any}) -Lists the SIP credentials for the specified Amazon Chime Voice Connector. +Lists the SIP credentials for the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +ListVoiceConnectorTerminationCredentials, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -5533,7 +5890,11 @@ end list_voice_connectors() list_voice_connectors(params::Dict{String,<:Any}) -Lists the Amazon Chime Voice Connectors for the administrator's AWS account. +Lists the Amazon Chime Voice Connectors for the administrator's AWS account. This API is +is no longer supported and will not be updated. We recommend using the latest version, +ListVoiceConnectors, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5595,7 +5956,11 @@ end put_app_instance_retention_settings(app_instance_retention_settings, app_instance_arn) put_app_instance_retention_settings(app_instance_retention_settings, app_instance_arn, params::Dict{String,<:Any}) -Sets the amount of time in days that a given AppInstance retains data. +Sets the amount of time in days that a given AppInstance retains data. This API is is no +longer supported and will not be updated. We recommend using the latest version, +PutAppInstanceRetentionSettings, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_retention_settings`: The time in days to retain data. Data type: number. @@ -5642,7 +6007,11 @@ end put_app_instance_streaming_configurations(app_instance_streaming_configurations, app_instance_arn) put_app_instance_streaming_configurations(app_instance_streaming_configurations, app_instance_arn, params::Dict{String,<:Any}) -The data streaming configurations of an AppInstance. +The data streaming configurations of an AppInstance. This API is is no longer supported +and will not be updated. We recommend using the latest version, +PutMessagingStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_streaming_configurations`: The streaming configurations set for an @@ -5783,7 +6152,11 @@ end put_sip_media_application_logging_configuration(sip_media_application_id) put_sip_media_application_logging_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Updates the logging configuration for the specified SIP media application. +Updates the logging configuration for the specified SIP media application. This API is is +no longer supported and will not be updated. We recommend using the latest version, +PutSipMediaApplicationLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -5823,7 +6196,11 @@ end Puts emergency calling configuration details to the specified Amazon Chime Voice Connector, such as emergency phone numbers and calling countries. Origination and termination settings must be enabled for the Amazon Chime Voice Connector before emergency calling can be -configured. +configured. This API is is no longer supported and will not be updated. We recommend +using the latest version, PutVoiceConnectorEmergencyCallingConfiguration, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `emergency_calling_configuration`: The emergency calling configuration details. @@ -5872,7 +6249,10 @@ end Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon -CloudWatch Logs. +CloudWatch Logs. This API is is no longer supported and will not be updated. We recommend +using the latest version, PutVoiceConnectorLoggingConfiguration, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `logging_configuration`: The logging configuration details to add. @@ -5919,7 +6299,11 @@ end Adds origination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to -turning off origination settings. +turning off origination settings. This API is is no longer supported and will not be +updated. We recommend using the latest version, PutVoiceConnectorOrigination, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `origination`: The origination setting details to add. @@ -5959,6 +6343,10 @@ end put_voice_connector_proxy(default_session_expiry_minutes, phone_number_pool_countries, voice_connector_id, params::Dict{String,<:Any}) Puts the specified proxy configuration to the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, PutVoiceConnectorProxy, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `default_session_expiry_minutes`: The default number of minutes allowed for proxy @@ -6020,7 +6408,11 @@ end Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to -Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data. +Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data. This +API is is no longer supported and will not be updated. We recommend using the latest +version, PutVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `streaming_configuration`: The streaming configuration details to add. @@ -6067,7 +6459,11 @@ end Adds termination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to -turning off termination settings. +turning off termination settings. This API is is no longer supported and will not be +updated. We recommend using the latest version, PutVoiceConnectorTermination, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `termination`: The termination setting details to add. @@ -6106,7 +6502,11 @@ end put_voice_connector_termination_credentials(voice_connector_id) put_voice_connector_termination_credentials(voice_connector_id, params::Dict{String,<:Any}) -Adds termination SIP credentials for the specified Amazon Chime Voice Connector. +Adds termination SIP credentials for the specified Amazon Chime Voice Connector. This API +is is no longer supported and will not be updated. We recommend using the latest version, +PutVoiceConnectorTerminationCredentials, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -6146,7 +6546,10 @@ end Redacts message content, but not metadata. The message exists in the back end, but the action returns null content, and the state shows as redacted. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call -as the value in the header. +as the value in the header. This API is is no longer supported and will not be updated. +We recommend using the latest version, RedactChannelMessage, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel containing the messages that you want to redact. @@ -6416,7 +6819,10 @@ Sends a message to a particular channel that the member is a part of. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. Also, STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no -metadata. +metadata. This API is is no longer supported and will not be updated. We recommend using +the latest version, SendChannelMessage, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The Idempotency token for each client request. @@ -6493,6 +6899,10 @@ parameter and which combinations are valid, refer to the StartStreamTranscriptio the Amazon Transcribe Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services. +This API is is no longer supported and will not be updated. We recommend using the latest +version, StartMeetingTranscription, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `transcription_configuration`: The configuration for the current transcription operation. @@ -6538,7 +6948,11 @@ end stop_meeting_transcription(meeting_id) stop_meeting_transcription(meeting_id, params::Dict{String,<:Any}) -Stops transcription for the specified meetingId. +Stops transcription for the specified meetingId. This API is is no longer supported and +will not be updated. We recommend using the latest version, StopMeetingTranscription, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `meeting_id`: The unique ID of the meeting for which you stop transcription. @@ -6572,7 +6986,9 @@ end tag_attendee(tags, attendee_id, meeting_id) tag_attendee(tags, attendee_id, meeting_id, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK attendee. +Applies the specified tags to the specified Amazon Chime attendee. TagAttendee is not +supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `tags`: The tag key-value pairs. @@ -6611,7 +7027,11 @@ end tag_meeting(tags, meeting_id) tag_meeting(tags, meeting_id, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK meeting. +Applies the specified tags to the specified Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +TagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `tags`: The tag key-value pairs. @@ -6646,7 +7066,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK meeting resource. +Applies the specified tags to the specified Amazon Chime SDK meeting resource. This API +is is no longer supported and will not be updated. We recommend using the latest version, +TagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `resource_arn`: The resource ARN. @@ -6687,7 +7111,9 @@ end untag_attendee(tag_keys, attendee_id, meeting_id) untag_attendee(tag_keys, attendee_id, meeting_id, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK attendee. +Untags the specified tags from the specified Amazon Chime SDK attendee. UntagAttendee is +not supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `tag_keys`: The tag keys. @@ -6726,7 +7152,11 @@ end untag_meeting(tag_keys, meeting_id) untag_meeting(tag_keys, meeting_id, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK meeting. +Untags the specified tags from the specified Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +UntagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `tag_keys`: The tag keys. @@ -6763,7 +7193,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK meeting resource. +Untags the specified tags from the specified Amazon Chime SDK meeting resource. Applies the +specified tags to the specified Amazon Chime SDK meeting resource. This API is is no +longer supported and will not be updated. We recommend using the latest version, +UntagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `resource_arn`: The resource ARN. @@ -6887,7 +7322,10 @@ end update_app_instance(name, app_instance_arn) update_app_instance(name, app_instance_arn, params::Dict{String,<:Any}) -Updates AppInstance metadata. +Updates AppInstance metadata. This API is is no longer supported and will not be updated. +We recommend using the latest version, UpdateAppInstance, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name that you want to change. @@ -6927,7 +7365,11 @@ end update_app_instance_user(name, app_instance_user_arn) update_app_instance_user(name, app_instance_user_arn, params::Dict{String,<:Any}) -Updates the details of an AppInstanceUser. You can update names and metadata. +Updates the details of an AppInstanceUser. You can update names and metadata. This API is +is no longer supported and will not be updated. We recommend using the latest version, +UpdateAppInstanceUser, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the AppInstanceUser. @@ -7007,7 +7449,11 @@ end Update a channel's attributes. Restriction: You can't change a channel's privacy. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateChannel, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `mode`: The mode of the update request. @@ -7054,6 +7500,10 @@ end Updates the content of a message. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, UpdateChannelMessage, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -7096,7 +7546,11 @@ end The details of the time when a user last read messages in a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateChannelReadMarker, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -7239,7 +7693,11 @@ end update_proxy_session(capabilities, proxy_session_id, voice_connector_id) update_proxy_session(capabilities, proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) -Updates the specified proxy session details, such as voice or SMS capabilities. +Updates the specified proxy session details, such as voice or SMS capabilities. This API +is is no longer supported and will not be updated. We recommend using the latest version, +UpdateProxySession, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `capabilities`: The proxy session capabilities. @@ -7368,7 +7826,11 @@ end update_sip_media_application(sip_media_application_id) update_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) -Updates the details of the specified SIP media application. +Updates the details of the specified SIP media application. This API is is no longer +supported and will not be updated. We recommend using the latest version, +UpdateSipMediaApplication, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -7407,7 +7869,11 @@ end update_sip_media_application_call(arguments, sip_media_application_id, transaction_id, params::Dict{String,<:Any}) Invokes the AWS Lambda function associated with the SIP media application and transaction -ID in an update request. The Lambda function can then return a new set of actions. +ID in an update request. The Lambda function can then return a new set of actions. This +API is is no longer supported and will not be updated. We recommend using the latest +version, UpdateSipMediaApplicationCall, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `arguments`: Arguments made available to the Lambda function as part of the @@ -7452,7 +7918,11 @@ end update_sip_rule(name, sip_rule_id) update_sip_rule(name, sip_rule_id, params::Dict{String,<:Any}) -Updates the details of the specified SIP rule. +Updates the details of the specified SIP rule. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateSipRule, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `name`: The new name for the specified SIP rule. @@ -7573,7 +8043,11 @@ end update_voice_connector(name, require_encryption, voice_connector_id) update_voice_connector(name, require_encryption, voice_connector_id, params::Dict{String,<:Any}) -Updates details for the specified Amazon Chime Voice Connector. +Updates details for the specified Amazon Chime Voice Connector. This API is is no longer +supported and will not be updated. We recommend using the latest version, +UpdateVoiceConnector, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector. @@ -7623,7 +8097,11 @@ end update_voice_connector_group(name, voice_connector_items, voice_connector_group_id, params::Dict{String,<:Any}) Updates details of the specified Amazon Chime Voice Connector group, such as the name and -Amazon Chime Voice Connector priority ranking. +Amazon Chime Voice Connector priority ranking. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateVoiceConnectorGroup, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector group. @@ -7676,7 +8154,11 @@ end Validates an address to be used for 911 calls made with Amazon Chime Voice Connectors. You can use validated addresses in a Presence Information Data Format Location Object file that you include in SIP requests. That helps ensure that addresses are routed to the appropriate -Public Safety Answering Point. +Public Safety Answering Point. This API is is no longer supported and will not be +updated. We recommend using the latest version, ValidateE911Address, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `aws_account_id`: The AWS account ID. diff --git a/src/services/chime_sdk_identity.jl b/src/services/chime_sdk_identity.jl index 50e09d6fa8..4bf90ed009 100644 --- a/src/services/chime_sdk_identity.jl +++ b/src/services/chime_sdk_identity.jl @@ -1145,6 +1145,9 @@ Updates the name and metadata of an AppInstanceBot. - `name`: The name of the AppInstanceBot. - `app_instance_bot_arn`: The ARN of the AppInstanceBot. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Configuration"`: The configuration for the bot update. """ function update_app_instance_bot( Metadata, Name, appInstanceBotArn; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 57bcfebc07..3c9ec4e109 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -149,7 +149,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MediaInsightsRuntimeMetadata"`: The runtime metadata for the media insights pipeline. Consists of a key-value map of strings. - `"S3RecordingSinkRuntimeConfiguration"`: The runtime configuration for the S3 recording - sink. + sink. If specified, the settings in this structure override any settings in + S3RecordingSinkConfiguration. - `"Tags"`: The tags assigned to the media insights pipeline. """ function create_media_insights_pipeline( @@ -315,6 +316,114 @@ function create_media_live_connector_pipeline( ) end +""" + create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration) + create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration, params::Dict{String,<:Any}) + +Creates an Kinesis video stream pool for the media pipeline. + +# Arguments +- `pool_name`: The name of the video stream pool. +- `stream_configuration`: The configuration settings for the video stream. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: The token assigned to the client making the request. +- `"Tags"`: The tags assigned to the video stream pool. +""" +function create_media_pipeline_kinesis_video_stream_pool( + PoolName, StreamConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/media-pipeline-kinesis-video-stream-pools", + Dict{String,Any}( + "PoolName" => PoolName, + "StreamConfiguration" => StreamConfiguration, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_media_pipeline_kinesis_video_stream_pool( + PoolName, + StreamConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/media-pipeline-kinesis-video-stream-pools", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "PoolName" => PoolName, + "StreamConfiguration" => StreamConfiguration, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_media_stream_pipeline(sinks, sources) + create_media_stream_pipeline(sinks, sources, params::Dict{String,<:Any}) + +Creates a streaming media pipeline. + +# Arguments +- `sinks`: The data sink for the media pipeline. +- `sources`: The data sources for the media pipeline. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: The token assigned to the client making the request. +- `"Tags"`: The tags assigned to the media pipeline. +""" +function create_media_stream_pipeline( + Sinks, Sources; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/sdk-media-stream-pipelines", + Dict{String,Any}( + "Sinks" => Sinks, "Sources" => Sources, "ClientRequestToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_media_stream_pipeline( + Sinks, + Sources, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/sdk-media-stream-pipelines", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Sinks" => Sinks, + "Sources" => Sources, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_media_capture_pipeline(media_pipeline_id) delete_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) @@ -418,6 +527,40 @@ function delete_media_pipeline( ) end +""" + delete_media_pipeline_kinesis_video_stream_pool(identifier) + delete_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) + +Deletes an Kinesis video stream pool. + +# Arguments +- `identifier`: The ID of the pool being deleted. + +""" +function delete_media_pipeline_kinesis_video_stream_pool( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "DELETE", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_media_pipeline_kinesis_video_stream_pool( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "DELETE", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_media_capture_pipeline(media_pipeline_id) get_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) @@ -521,6 +664,114 @@ function get_media_pipeline( ) end +""" + get_media_pipeline_kinesis_video_stream_pool(identifier) + get_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) + +Gets an Kinesis video stream pool. + +# Arguments +- `identifier`: The ID of the video stream pool. + +""" +function get_media_pipeline_kinesis_video_stream_pool( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "GET", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_media_pipeline_kinesis_video_stream_pool( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "GET", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_speaker_search_task(identifier, speaker_search_task_id) + get_speaker_search_task(identifier, speaker_search_task_id, params::Dict{String,<:Any}) + +Retrieves the details of the specified speaker search task. + +# Arguments +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. +- `speaker_search_task_id`: The ID of the speaker search task. + +""" +function get_speaker_search_task( + identifier, speakerSearchTaskId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "GET", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks/$(speakerSearchTaskId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_speaker_search_task( + identifier, + speakerSearchTaskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "GET", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks/$(speakerSearchTaskId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_voice_tone_analysis_task(identifier, voice_tone_analysis_task_id) + get_voice_tone_analysis_task(identifier, voice_tone_analysis_task_id, params::Dict{String,<:Any}) + +Retrieves the details of a voice tone analysis task. + +# Arguments +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. +- `voice_tone_analysis_task_id`: The ID of the voice tone analysis task. + +""" +function get_voice_tone_analysis_task( + identifier, voiceToneAnalysisTaskId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "GET", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks/$(voiceToneAnalysisTaskId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_voice_tone_analysis_task( + identifier, + voiceToneAnalysisTaskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "GET", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks/$(voiceToneAnalysisTaskId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_media_capture_pipelines() list_media_capture_pipelines(params::Dict{String,<:Any}) @@ -586,6 +837,39 @@ function list_media_insights_pipeline_configurations( ) end +""" + list_media_pipeline_kinesis_video_stream_pools() + list_media_pipeline_kinesis_video_stream_pools(params::Dict{String,<:Any}) + +Lists the video stream pools in the media pipeline. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of results to return in a single call. +- `"next-token"`: The token used to return the next page of results. +""" +function list_media_pipeline_kinesis_video_stream_pools(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "GET", + "/media-pipeline-kinesis-video-stream-pools"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_media_pipeline_kinesis_video_stream_pools( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "GET", + "/media-pipeline-kinesis-video-stream-pools", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_media_pipelines() list_media_pipelines(params::Dict{String,<:Any}) @@ -650,6 +934,197 @@ function list_tags_for_resource( ) end +""" + start_speaker_search_task(voice_profile_domain_arn, identifier) + start_speaker_search_task(voice_profile_domain_arn, identifier, params::Dict{String,<:Any}) + +Starts a speaker search task. Before starting any speaker search tasks, you must provide +all notices and obtain all consents from the speaker as required under applicable privacy +and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK. + +# Arguments +- `voice_profile_domain_arn`: The ARN of the voice profile domain that will store the voice + profile. +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: The unique identifier for the client request. Use a different + token for different speaker search tasks. +- `"KinesisVideoStreamSourceTaskConfiguration"`: The task configuration for the Kinesis + video stream source of the media insights pipeline. +""" +function start_speaker_search_task( + VoiceProfileDomainArn, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks?operation=start", + Dict{String,Any}( + "VoiceProfileDomainArn" => VoiceProfileDomainArn, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_speaker_search_task( + VoiceProfileDomainArn, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks?operation=start", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "VoiceProfileDomainArn" => VoiceProfileDomainArn, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_voice_tone_analysis_task(language_code, identifier) + start_voice_tone_analysis_task(language_code, identifier, params::Dict{String,<:Any}) + +Starts a voice tone analysis task. For more information about voice tone analysis, see +Using Amazon Chime SDK voice analytics in the Amazon Chime SDK Developer Guide. Before +starting any voice tone analysis tasks, you must provide all notices and obtain all +consents from the speaker as required under applicable privacy and biometrics laws, and as +required under the AWS service terms for the Amazon Chime SDK. + +# Arguments +- `language_code`: The language code. +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: The unique identifier for the client request. Use a different + token for different voice tone analysis tasks. +- `"KinesisVideoStreamSourceTaskConfiguration"`: The task configuration for the Kinesis + video stream source of the media insights pipeline. +""" +function start_voice_tone_analysis_task( + LanguageCode, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks?operation=start", + Dict{String,Any}( + "LanguageCode" => LanguageCode, "ClientRequestToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_voice_tone_analysis_task( + LanguageCode, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks?operation=start", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "LanguageCode" => LanguageCode, "ClientRequestToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_speaker_search_task(identifier, speaker_search_task_id) + stop_speaker_search_task(identifier, speaker_search_task_id, params::Dict{String,<:Any}) + +Stops a speaker search task. + +# Arguments +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. +- `speaker_search_task_id`: The speaker search task ID. + +""" +function stop_speaker_search_task( + identifier, speakerSearchTaskId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks/$(speakerSearchTaskId)?operation=stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_speaker_search_task( + identifier, + speakerSearchTaskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/speaker-search-tasks/$(speakerSearchTaskId)?operation=stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_voice_tone_analysis_task(identifier, voice_tone_analysis_task_id) + stop_voice_tone_analysis_task(identifier, voice_tone_analysis_task_id, params::Dict{String,<:Any}) + +Stops a voice tone analysis task. + +# Arguments +- `identifier`: The unique identifier of the resource to be updated. Valid values include + the ID and ARN of the media insights pipeline. +- `voice_tone_analysis_task_id`: The ID of the voice tone analysis task. + +""" +function stop_voice_tone_analysis_task( + identifier, voiceToneAnalysisTaskId; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks/$(voiceToneAnalysisTaskId)?operation=stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_voice_tone_analysis_task( + identifier, + voiceToneAnalysisTaskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "POST", + "/media-insights-pipelines/$(identifier)/voice-tone-analysis-tasks/$(voiceToneAnalysisTaskId)?operation=stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -834,3 +1309,40 @@ function update_media_insights_pipeline_status( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_media_pipeline_kinesis_video_stream_pool(identifier) + update_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) + +Updates an Kinesis video stream pool in a media pipeline. + +# Arguments +- `identifier`: The ID of the video stream pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamConfiguration"`: The configuration settings for the video stream. +""" +function update_media_pipeline_kinesis_video_stream_pool( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return chime_sdk_media_pipelines( + "PUT", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_media_pipeline_kinesis_video_stream_pool( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return chime_sdk_media_pipelines( + "PUT", + "/media-pipeline-kinesis-video-stream-pools/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/chime_sdk_meetings.jl b/src/services/chime_sdk_meetings.jl index 416d309f5c..071e1553b1 100644 --- a/src/services/chime_sdk_meetings.jl +++ b/src/services/chime_sdk_meetings.jl @@ -52,16 +52,22 @@ end Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When -using capabilities, be aware of these corner cases: You can't set content capabilities to -SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If -you don't set the video capability to receive, the response will contain an HTTP 400 Bad -Request status code. However, you can set your video capability to receive and you set your -content capability to not receive. When you change an audio capability from None or -Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio -will flow from the attendee to the other meeting participants. When you change a video or -content capability from None or Receive to Send or SendReceive , and if the attendee turned -on their video or content streams, remote attendees can receive those streams, but only -after media renegotiation between the client and the Amazon Chime back-end server. +using capabilities, be aware of these corner cases: If you specify +MeetingFeatures:Video:MaxResolution:None when you create a meeting, all API requests that +include SendReceive, Send, or Receive for AttendeeCapabilities:Video will be rejected with +ValidationError 400. If you specify MeetingFeatures:Content:MaxResolution:None when you +create a meeting, all API requests that include SendReceive, Send, or Receive for +AttendeeCapabilities:Content will be rejected with ValidationError 400. You can't set +content capabilities to SendReceive or Receive unless you also set video capabilities to +SendReceive or Receive. If you don't set the video capability to receive, the response will +contain an HTTP 400 Bad Request status code. However, you can set your video capability to +receive and you set your content capability to not receive. When you change an audio +capability from None or Receive to Send or SendReceive , and if the attendee left their +microphone unmuted, audio will flow from the attendee to the other meeting participants. +When you change a video or content capability from None or Receive to Send or SendReceive , +and if the attendee turned on their video or content streams, remote attendees can receive +those streams, but only after media renegotiation between the client and the Amazon Chime +back-end server. # Arguments - `capabilities`: The capabilities (audio, video, or content) that you want to update. @@ -131,17 +137,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys attendee. If you don't specify capabilities, all users have send and receive capabilities on all media channels by default. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about - those values, see . When using capabilities, be aware of these corner cases: You can't - set content capabilities to SendReceive or Receive unless you also set video capabilities - to SendReceive or Receive. If you don't set the video capability to receive, the response - will contain an HTTP 400 Bad Request status code. However, you can set your video - capability to receive and you set your content capability to not receive. When you change - an audio capability from None or Receive to Send or SendReceive , and if the attendee left - their microphone unmuted, audio will flow from the attendee to the other meeting - participants. When you change a video or content capability from None or Receive to Send - or SendReceive , and if the attendee turned on their video or content streams, remote - attendees can receive those streams, but only after media renegotiation between the client - and the Amazon Chime back-end server. + those values, see . When using capabilities, be aware of these corner cases: If you + specify MeetingFeatures:Video:MaxResolution:None when you create a meeting, all API + requests that include SendReceive, Send, or Receive for AttendeeCapabilities:Video will be + rejected with ValidationError 400. If you specify + MeetingFeatures:Content:MaxResolution:None when you create a meeting, all API requests that + include SendReceive, Send, or Receive for AttendeeCapabilities:Content will be rejected + with ValidationError 400. You can't set content capabilities to SendReceive or Receive + unless you also set video capabilities to SendReceive or Receive. If you don't set the + video capability to receive, the response will contain an HTTP 400 Bad Request status code. + However, you can set your video capability to receive and you set your content capability + to not receive. When you change an audio capability from None or Receive to Send or + SendReceive , and if the attendee left their microphone unmuted, audio will flow from the + attendee to the other meeting participants. When you change a video or content capability + from None or Receive to Send or SendReceive , and if the attendee turned on their video or + content streams, remote attendees can receive those streams, but only after media + renegotiation between the client and the Amazon Chime back-end server. """ function create_attendee( ExternalUserId, MeetingId; aws_config::AbstractAWSConfig=global_aws_config() @@ -189,8 +200,8 @@ SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide. - `media_region`: The Region in which to create the meeting. Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, - us-east-1, us-east-2, us-west-1, us-west-2. Available values in AWS GovCloud (US) Regions: - us-gov-east-1, us-gov-west-1. + us-east-1, us-east-2, us-west-1, us-west-2. Available values in Amazon Web Services + GovCloud (US) Regions: us-gov-east-1, us-gov-west-1. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -208,20 +219,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using its own API operations. For more information, refer to the documentation for that service. Each resource can have up to 50 tags. For other limits, see Tag Naming and Usage Conventions in the AWS General Reference. You can only tag resources that are located in - the specified AWS Region for the AWS account. To add tags to a resource, you need the - necessary permissions for the service that the resource belongs to as well as permissions - for adding tags. For more information, see the documentation for each service. Do not - store personally identifiable information (PII) or other confidential or sensitive - information in tags. We use tags to provide you with billing and administration services. - Tags are not intended to be used for private or sensitive data. Minimum permissions In - addition to the tag:TagResources permission required by this operation, you must also have - the tagging permission defined by the service that created the resource. For example, to - tag a ChimeSDKMeetings instance using the TagResources operation, you must have both of the - following permissions: tag:TagResources ChimeSDKMeetings:CreateTags Some services - might have specific requirements for tagging some resources. For example, to tag an Amazon - S3 bucket, you must also have the s3:GetBucketTagging permission. If the expected minimum - permissions don't work, check the documentation for that service's tagging APIs for more - information. + the specified Amazon Web Services Region for the Amazon Web Services account. To add tags + to a resource, you need the necessary permissions for the service that the resource belongs + to as well as permissions for adding tags. For more information, see the documentation for + each service. Do not store personally identifiable information (PII) or other + confidential or sensitive information in tags. We use tags to provide you with billing and + administration services. Tags are not intended to be used for private or sensitive data. + Minimum permissions In addition to the tag:TagResources permission required by this + operation, you must also have the tagging permission defined by the service that created + the resource. For example, to tag a ChimeSDKMeetings instance using the TagResources + operation, you must have both of the following permissions: tag:TagResources + ChimeSDKMeetings:CreateTags Some services might have specific requirements for tagging + some resources. For example, to tag an Amazon S3 bucket, you must also have the + s3:GetBucketTagging permission. If the expected minimum permissions don't work, check the + documentation for that service's tagging APIs for more information. - `"TenantIds"`: A consistent and opaque identifier, created and maintained by the builder to represent a segment of their users. """ @@ -288,8 +299,8 @@ the Amazon Chime SDK in the Amazon Chime Developer Guide. - `media_region`: The Region in which to create the meeting. Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, - us-east-1, us-east-2, us-west-1, us-west-2. Available values in AWS GovCloud (US) Regions: - us-gov-east-1, us-gov-west-1. + us-east-1, us-east-2, us-west-1, us-west-2. Available values in Amazon Web Services + GovCloud (US) Regions: us-gov-east-1, us-gov-west-1. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -575,9 +586,16 @@ Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. If specify an invalid configuration, a TranscriptFailed event will be sent with the contents of the BadRequestException generated by Amazon Transcribe. For more information on each parameter and which combinations are valid, refer to the StartStreamTranscription API in -the Amazon Transcribe Developer Guide. Amazon Chime SDK live transcription is powered by -Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including -the terms specific to the AWS Machine Learning and Artificial Intelligence Services. +the Amazon Transcribe Developer Guide. By default, Amazon Transcribe may use and store +audio content processed by the service to develop and improve Amazon Web Services AI/ML +services as further described in section 50 of the Amazon Web Services Service Terms. Using +Amazon Transcribe may be subject to federal and state laws or regulations regarding the +recording or interception of electronic communications. It is your and your end users’ +responsibility to comply with all applicable laws regarding the recording, including +properly notifying all participants in a recorded session or communication that the session +or communication is being recorded, and obtaining all necessary consents. You can opt out +from Amazon Web Services using audio content to develop and improve AWS AI/ML services by +configuring an AI services opt out policy using Amazon Web Services Organizations. # Arguments - `meeting_id`: The unique ID of the meeting being transcribed. @@ -624,10 +642,17 @@ end stop_meeting_transcription(meeting_id, params::Dict{String,<:Any}) Stops transcription for the specified meetingId. For more information, refer to Using -Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. Amazon Chime -SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject -to the AWS Service Terms, including the terms specific to the AWS Machine Learning and -Artificial Intelligence Services. +Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. By default, +Amazon Transcribe may use and store audio content processed by the service to develop and +improve Amazon Web Services AI/ML services as further described in section 50 of the Amazon +Web Services Service Terms. Using Amazon Transcribe may be subject to federal and state +laws or regulations regarding the recording or interception of electronic communications. +It is your and your end users’ responsibility to comply with all applicable laws +regarding the recording, including properly notifying all participants in a recorded +session or communication that the session or communication is being recorded, and obtaining +all necessary consents. You can opt out from Amazon Web Services using audio content to +develop and improve Amazon Web Services AI/ML services by configuring an AI services opt +out policy using Amazon Web Services Organizations. # Arguments - `meeting_id`: The unique ID of the meeting for which you stop transcription. @@ -708,12 +733,12 @@ attempt to remove tags from a resource that were already removed. Note the follo remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see the documentation for the service whose resource you want to untag. You can only tag -resources that are located in the specified AWS Region for the calling AWS account. -Minimum permissions In addition to the tag:UntagResources permission required by this -operation, you must also have the remove tags permission defined by the service that -created the resource. For example, to remove the tags from an Amazon EC2 instance using the -UntagResources operation, you must have both of the following permissions: -tag:UntagResource ChimeSDKMeetings:DeleteTags +resources that are located in the specified Amazon Web Services Region for the calling +Amazon Web Services account. Minimum permissions In addition to the tag:UntagResources +permission required by this operation, you must also have the remove tags permission +defined by the service that created the resource. For example, to remove the tags from an +Amazon EC2 instance using the UntagResources operation, you must have both of the following +permissions: tag:UntagResource ChimeSDKMeetings:DeleteTags # Arguments - `resource_arn`: The ARN of the resource that you're removing tags from. @@ -758,17 +783,22 @@ end The capabilities that you want to update. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information -about those values, see . When using capabilities, be aware of these corner cases: You -can't set content capabilities to SendReceive or Receive unless you also set video -capabilities to SendReceive or Receive. If you don't set the video capability to receive, -the response will contain an HTTP 400 Bad Request status code. However, you can set your -video capability to receive and you set your content capability to not receive. When you -change an audio capability from None or Receive to Send or SendReceive , and if the -attendee left their microphone unmuted, audio will flow from the attendee to the other -meeting participants. When you change a video or content capability from None or Receive -to Send or SendReceive , and if the attendee turned on their video or content streams, -remote attendees can receive those streams, but only after media renegotiation between the -client and the Amazon Chime back-end server. +about those values, see . When using capabilities, be aware of these corner cases: If +you specify MeetingFeatures:Video:MaxResolution:None when you create a meeting, all API +requests that include SendReceive, Send, or Receive for AttendeeCapabilities:Video will be +rejected with ValidationError 400. If you specify +MeetingFeatures:Content:MaxResolution:None when you create a meeting, all API requests that +include SendReceive, Send, or Receive for AttendeeCapabilities:Content will be rejected +with ValidationError 400. You can't set content capabilities to SendReceive or Receive +unless you also set video capabilities to SendReceive or Receive. If you don't set the +video capability to receive, the response will contain an HTTP 400 Bad Request status code. +However, you can set your video capability to receive and you set your content capability +to not receive. When you change an audio capability from None or Receive to Send or +SendReceive , and if the attendee left their microphone unmuted, audio will flow from the +attendee to the other meeting participants. When you change a video or content capability +from None or Receive to Send or SendReceive , and if the attendee turned on their video or +content streams, remote attendees can receive those streams, but only after media +renegotiation between the client and the Amazon Chime back-end server. # Arguments - `attendee_id`: The ID of the attendee associated with the update request. diff --git a/src/services/chime_sdk_messaging.jl b/src/services/chime_sdk_messaging.jl index d8daa84d7f..296c8e4820 100644 --- a/src/services/chime_sdk_messaging.jl +++ b/src/services/chime_sdk_messaging.jl @@ -132,10 +132,10 @@ end channel_flow_callback(callback_id, channel_message, channel_arn) channel_flow_callback(callback_id, channel_message, channel_arn, params::Dict{String,<:Any}) -Calls back Chime SDK Messaging with a processing response message. This should be invoked -from the processor Lambda. This is a developer API. You can return one of the following -processing responses: Update message content or metadata Deny a message Make no -changes to the message +Calls back Amazon Chime SDK messaging with a processing response message. This should be +invoked from the processor Lambda. This is a developer API. You can return one of the +following processing responses: Update message content or metadata Deny a message +Make no changes to the message # Arguments - `callback_id`: The identifier passed to the processor by the service when invoked. Use @@ -341,7 +341,7 @@ channel flows with channels, and the processors in the channel flow then take ac messages sent to that channel. This is a developer API. Channel flows process the following items: New and updated messages Persistent and non-persistent messages The Standard message type Channel flows don't process Control or System messages. For more -information about the message types provided by Chime SDK Messaging, refer to Message types +information about the message types provided by Chime SDK messaging, refer to Message types in the Amazon Chime developer guide. # Arguments @@ -1329,8 +1329,8 @@ end get_channel_membership_preferences(channel_arn, member_arn, x-amz-chime-bearer, params::Dict{String,<:Any}) Gets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified -channel. A user or a bot must be a member of the channel and own the membership to be able -to retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel +channel. A user or a bot must be a member of the channel and own the membership in order to +retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't retrieve preferences for other users or bots. Banned users or bots can't retrieve membership preferences for the channel from which they are banned. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or @@ -1453,11 +1453,10 @@ status of messages going through channel flow processing. The API provides an al to retrieving message status if the event was not received because a client wasn't connected to a websocket. Messages can have any one of these statuses. SENT Message processed successfully PENDING Ongoing processing FAILED Processing failed DENIED -Messasge denied by the processor This API does not return statuses for denied -messages, because we don't store them once the processor denies them. Only the message -sender can invoke this API. The x-amz-chime-bearer request header is mandatory. Use the -ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the -header. +Message denied by the processor This API does not return statuses for denied messages, +because we don't store them once the processor denies them. Only the message sender can +invoke this API. The x-amz-chime-bearer request header is mandatory. Use the ARN of the +AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. # Arguments - `channel_arn`: The ARN of the channel @@ -1739,7 +1738,7 @@ end list_channel_memberships_for_app_instance_user(x-amz-chime-bearer) list_channel_memberships_for_app_instance_user(x-amz-chime-bearer, params::Dict{String,<:Any}) - Lists all channels that anr AppInstanceUser or AppInstanceBot is a part of. Only an + Lists all channels that an AppInstanceUser or AppInstanceBot is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. @@ -2210,13 +2209,13 @@ end put_channel_membership_preferences(preferences, channel_arn, member_arn, x-amz-chime-bearer) put_channel_membership_preferences(preferences, channel_arn, member_arn, x-amz-chime-bearer, params::Dict{String,<:Any}) -Sets the membership preferences of an AppInstanceUser or AppIntanceBot for the specified +Sets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified channel. The user or bot must be a member of the channel. Only the user or bot who owns the membership can set preferences. Users or bots in the AppInstanceAdmin and channel moderator -roles can't set preferences for other users or users. Banned users or bots can't set -membership preferences for the channel from which they are banned. The x-amz-chime-bearer -request header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes -the API call as the value in the header. +roles can't set preferences for other users. Banned users or bots can't set membership +preferences for the channel from which they are banned. The x-amz-chime-bearer request +header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes the API +call as the value in the header. # Arguments - `preferences`: The channel membership preferences of an AppInstanceUser . @@ -2424,15 +2423,19 @@ end Sends a message to a particular channel that the member is a part of. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. Also, STANDARD messages -can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of -data and no metadata. +can be up to 4KB in size and contain metadata. Metadata is arbitrary, and you can use it in +a variety of ways, such as containing a link to an attachment. CONTROL messages are +limited to 30 bytes and do not contain metadata. # Arguments - `client_request_token`: The Idempotency token for each client request. -- `content`: The content of the message. +- `content`: The content of the channel message. - `persistence`: Boolean that controls whether the message is persisted on the back end. Required. -- `type`: The type of message, STANDARD or CONTROL. +- `type`: The type of message, STANDARD or CONTROL. STANDARD messages can be up to 4KB in + size and contain metadata. Metadata is arbitrary, and you can use it in a variety of ways, + such as containing a link to an attachment. CONTROL messages are limited to 30 bytes and + do not contain metadata. - `channel_arn`: The ARN of the channel. - `x-amz-chime-bearer`: The ARN of the AppInstanceUser or AppInstanceBot that makes the API call. @@ -2445,6 +2448,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Metadata"`: The optional metadata for each message. - `"PushNotification"`: The push notification configuration of the message. - `"SubChannelId"`: The ID of the SubChannel in the request. +- `"Target"`: The target of a message. Must be a member of the channel, such as another + user, a bot, or the sender. Only the target and the sender can view targeted messages. Only + users who can see targeted messages can take actions on them. However, administrators can + delete targeted messages that they can’t see. """ function send_channel_message( ClientRequestToken, @@ -2693,7 +2700,7 @@ the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the the header. # Arguments -- `content`: The content of the message being updated. +- `content`: The content of the channel message. - `channel_arn`: The ARN of the channel. - `message_id`: The ID string of the message being updated. - `x-amz-chime-bearer`: The ARN of the AppInstanceUser or AppInstanceBot that makes the API diff --git a/src/services/chime_sdk_voice.jl b/src/services/chime_sdk_voice.jl index cf5fe24f2b..0aa84f02cc 100644 --- a/src/services/chime_sdk_voice.jl +++ b/src/services/chime_sdk_voice.jl @@ -141,7 +141,10 @@ end batch_update_phone_number(update_phone_number_request_items) batch_update_phone_number(update_phone_number_request_items, params::Dict{String,<:Any}) -Updates one or more phone numbers. +Updates phone number product types, calling names, or phone number names. You can update +one attribute at a time for each UpdatePhoneNumberRequestItem. For example, you can update +the product type, the calling name, or phone name. You cannot have a duplicate +phoneNumberId in a request. # Arguments - `update_phone_number_request_items`: Lists the phone numbers in the update request. @@ -191,6 +194,9 @@ must use the Amazon Chime SDK SIP media application dial-in product type. - `e164_phone_numbers`: List of phone numbers, in E.164 format. - `product_type`: The phone number product type. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Name"`: Specifies the name assigned to one or more phone numbers. """ function create_phone_number_order( E164PhoneNumbers, ProductType; aws_config::AbstractAWSConfig=global_aws_config() @@ -1440,7 +1446,9 @@ end get_sip_media_application_alexa_skill_configuration(sip_media_application_id) get_sip_media_application_alexa_skill_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Gets the Alexa Skill configuration for the SIP media application. +Gets the Alexa Skill configuration for the SIP media application. Due to changes made by +the Amazon Alexa service, this API is no longer available for use. For more information, +refer to the Alexa Smart Properties page. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -2430,7 +2438,9 @@ end put_sip_media_application_alexa_skill_configuration(sip_media_application_id) put_sip_media_application_alexa_skill_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Updates the Alexa Skill configuration for the SIP media application. +Updates the Alexa Skill configuration for the SIP media application. Due to changes made +by the Amazon Alexa service, this API is no longer available for use. For more information, +refer to the Alexa Smart Properties page. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -3192,12 +3202,13 @@ end update_phone_number(phone_number_id) update_phone_number(phone_number_id, params::Dict{String,<:Any}) -Updates phone number details, such as product type or calling name, for the specified phone -number ID. You can update one phone number detail at a time. For example, you can update -either the product type or the calling name in one action. For numbers outside the U.S., -you must use the Amazon Chime SDK SIP Media Application Dial-In product type. Updates to -outbound calling names can take 72 hours to complete. Pending updates to outbound calling -names must be complete before you can request another update. +Updates phone number details, such as product type, calling name, or phone number name for +the specified phone number ID. You can update one phone number detail at a time. For +example, you can update either the product type, calling name, or phone number name in one +action. For numbers outside the U.S., you must use the Amazon Chime SDK SIP Media +Application Dial-In product type. Updates to outbound calling names can take 72 hours to +complete. Pending updates to outbound calling names must be complete before you can request +another update. # Arguments - `phone_number_id`: The phone number ID. @@ -3205,6 +3216,7 @@ names must be complete before you can request another update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CallingName"`: The outbound calling name associated with the phone number. +- `"Name"`: Specifies the updated name assigned to one or more phone numbers. - `"ProductType"`: The product type. """ function update_phone_number( diff --git a/src/services/cleanrooms.jl b/src/services/cleanrooms.jl index e1544eef42..0d2c9cc8f5 100644 --- a/src/services/cleanrooms.jl +++ b/src/services/cleanrooms.jl @@ -4,6 +4,54 @@ using AWS.AWSServices: cleanrooms using AWS.Compat using AWS.UUIDs +""" + batch_get_collaboration_analysis_template(analysis_template_arns, collaboration_identifier) + batch_get_collaboration_analysis_template(analysis_template_arns, collaboration_identifier, params::Dict{String,<:Any}) + +Retrieves multiple analysis templates within a collaboration by their Amazon Resource Names +(ARNs). + +# Arguments +- `analysis_template_arns`: The Amazon Resource Name (ARN) associated with the analysis + template within a collaboration. +- `collaboration_identifier`: A unique identifier for the collaboration that the analysis + templates belong to. Currently accepts collaboration ID. + +""" +function batch_get_collaboration_analysis_template( + analysisTemplateArns, + collaborationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/collaborations/$(collaborationIdentifier)/batch-analysistemplates", + Dict{String,Any}("analysisTemplateArns" => analysisTemplateArns); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_collaboration_analysis_template( + analysisTemplateArns, + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/collaborations/$(collaborationIdentifier)/batch-analysistemplates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("analysisTemplateArns" => analysisTemplateArns), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_schema(collaboration_identifier, names) batch_get_schema(collaboration_identifier, names, params::Dict{String,<:Any}) @@ -13,7 +61,7 @@ Retrieves multiple schemas by their identifiers. # Arguments - `collaboration_identifier`: A unique identifier for the collaboration that the schemas belong to. Currently accepts collaboration ID. -- `names`: The names for the schema objects to retrieve.> +- `names`: The names for the schema objects to retrieve. """ function batch_get_schema( @@ -42,6 +90,115 @@ function batch_get_schema( ) end +""" + batch_get_schema_analysis_rule(collaboration_identifier, schema_analysis_rule_requests) + batch_get_schema_analysis_rule(collaboration_identifier, schema_analysis_rule_requests, params::Dict{String,<:Any}) + +Retrieves multiple analysis rule schemas. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + schema analysis rule. +- `schema_analysis_rule_requests`: The information that's necessary to retrieve a schema + analysis rule. + +""" +function batch_get_schema_analysis_rule( + collaborationIdentifier, + schemaAnalysisRuleRequests; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/collaborations/$(collaborationIdentifier)/batch-schema-analysis-rule", + Dict{String,Any}("schemaAnalysisRuleRequests" => schemaAnalysisRuleRequests); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_schema_analysis_rule( + collaborationIdentifier, + schemaAnalysisRuleRequests, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/collaborations/$(collaborationIdentifier)/batch-schema-analysis-rule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "schemaAnalysisRuleRequests" => schemaAnalysisRuleRequests + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_analysis_template(format, membership_identifier, name, source) + create_analysis_template(format, membership_identifier, name, source, params::Dict{String,<:Any}) + +Creates a new analysis template. + +# Arguments +- `format`: The format of the analysis template. +- `membership_identifier`: The identifier for a membership resource. +- `name`: The name of the analysis template. +- `source`: The information in the analysis template. Currently supports text, the query + text for the analysis template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"analysisParameters"`: The parameters of the analysis template. +- `"description"`: The description of the analysis template. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_analysis_template( + format, + membershipIdentifier, + name, + source; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/analysistemplates", + Dict{String,Any}("format" => format, "name" => name, "source" => source); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_analysis_template( + format, + membershipIdentifier, + name, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/analysistemplates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("format" => format, "name" => name, "source" => source), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_collaboration(creator_display_name, creator_member_abilities, description, members, name, query_log_status) create_collaboration(creator_display_name, creator_member_abilities, description, members, name, query_log_status, params::Dict{String,<:Any}) @@ -59,6 +216,9 @@ Creates a new collaboration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creatorPaymentConfiguration"`: The collaboration creator's payment responsibilities set + by the collaboration creator. If the collaboration creator hasn't specified anyone as the + member paying for query compute costs, then the member who can query is the default payer. - `"dataEncryptionMetadata"`: The settings for client-side encryption with Cryptographic Computing for Clean Rooms. - `"tags"`: An optional label that you can assign to a resource when you create it. Each @@ -122,6 +282,86 @@ function create_collaboration( ) end +""" + create_configured_audience_model_association(configured_audience_model_arn, configured_audience_model_association_name, manage_resource_policies, membership_identifier) + create_configured_audience_model_association(configured_audience_model_arn, configured_audience_model_association_name, manage_resource_policies, membership_identifier, params::Dict{String,<:Any}) + +Provides the details necessary to create a configured audience model association. + +# Arguments +- `configured_audience_model_arn`: A unique identifier for the configured audience model + that you want to associate. +- `configured_audience_model_association_name`: The name of the configured audience model + association. +- `manage_resource_policies`: When TRUE, indicates that the resource policy for the + configured audience model resource being associated is configured for Clean Rooms to manage + permissions related to the given collaboration. When FALSE, indicates that the configured + audience model resource owner will manage permissions related to the given collaboration. + Setting this to TRUE requires you to have permissions to create, update, and delete the + resource policy for the cleanrooms-ml resource when you call the + DeleteConfiguredAudienceModelAssociation resource. In addition, if you are the + collaboration creator and specify TRUE, you must have the same permissions when you call + the DeleteMember and DeleteCollaboration APIs. +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The configured audience model is associated to the collaboration that this + membership belongs to. Accepts a membership ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the configured audience model association. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_configured_audience_model_association( + configuredAudienceModelArn, + configuredAudienceModelAssociationName, + manageResourcePolicies, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations", + Dict{String,Any}( + "configuredAudienceModelArn" => configuredAudienceModelArn, + "configuredAudienceModelAssociationName" => + configuredAudienceModelAssociationName, + "manageResourcePolicies" => manageResourcePolicies, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configured_audience_model_association( + configuredAudienceModelArn, + configuredAudienceModelAssociationName, + manageResourcePolicies, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuredAudienceModelArn" => configuredAudienceModelArn, + "configuredAudienceModelAssociationName" => + configuredAudienceModelAssociationName, + "manageResourcePolicies" => manageResourcePolicies, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_configured_table(allowed_columns, analysis_method, name, table_reference) create_configured_table(allowed_columns, analysis_method, name, table_reference, params::Dict{String,<:Any}) @@ -134,7 +374,7 @@ Creates a new configured table resource. - `analysis_method`: The analysis method for the configured tables. The only valid value is currently `DIRECT_QUERY`. - `name`: The name of the configured table. -- `table_reference`: A reference to the AWS Glue table being configured. +- `table_reference`: A reference to the Glue table being configured. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -201,7 +441,7 @@ be created for a given configured table. # Arguments - `analysis_rule_policy`: The entire created configured table analysis rule object. -- `analysis_rule_type`: The type of analysis rule. Valid values are AGGREGATION and LIST. +- `analysis_rule_type`: The type of analysis rule. - `configured_table_identifier`: The identifier for the configured table to create the analysis rule for. Currently accepts the configured table ID. @@ -329,10 +569,16 @@ Creates a membership for a specific collaboration identifier and joins the colla # Arguments - `collaboration_identifier`: The unique ID for the associated collaboration. - `query_log_status`: An indicator as to whether query logging has been enabled or disabled - for the collaboration. + for the membership. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"defaultResultConfiguration"`: The default protected query result configuration as + specified by the member who can receive results. +- `"paymentConfiguration"`: The payment responsibilities accepted by the collaboration + member. Not required if the collaboration member has the member ability to run queries. + Required if the collaboration member doesn't have the member ability to run queries but is + configured as a payer by the collaboration creator. - `"tags"`: An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to @@ -378,6 +624,118 @@ function create_membership( ) end +""" + create_privacy_budget_template(auto_refresh, membership_identifier, parameters, privacy_budget_type) + create_privacy_budget_template(auto_refresh, membership_identifier, parameters, privacy_budget_type, params::Dict{String,<:Any}) + +Creates a privacy budget template for a specified membership. Each membership can have only +one privacy budget template, but it can be deleted and recreated. If you need to change the +privacy budget template for a membership, use the UpdatePrivacyBudgetTemplate operation. + +# Arguments +- `auto_refresh`: How often the privacy budget refreshes. If you plan to regularly bring + new data into the collaboration, you can use CALENDAR_MONTH to automatically get a new + privacy budget for the collaboration every calendar month. Choosing this option allows + arbitrary amounts of information to be revealed about rows of the data when repeatedly + queries across refreshes. Avoid choosing this if the same rows will be repeatedly queried + between privacy budget refreshes. +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget template is created in the collaboration that this + membership belongs to. Accepts a membership ID. +- `parameters`: Specifies your parameters for the privacy budget template. +- `privacy_budget_type`: Specifies the type of the privacy budget template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_privacy_budget_template( + autoRefresh, + membershipIdentifier, + parameters, + privacyBudgetType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/privacybudgettemplates", + Dict{String,Any}( + "autoRefresh" => autoRefresh, + "parameters" => parameters, + "privacyBudgetType" => privacyBudgetType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_privacy_budget_template( + autoRefresh, + membershipIdentifier, + parameters, + privacyBudgetType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/privacybudgettemplates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "autoRefresh" => autoRefresh, + "parameters" => parameters, + "privacyBudgetType" => privacyBudgetType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_analysis_template(analysis_template_identifier, membership_identifier) + delete_analysis_template(analysis_template_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an analysis template. + +# Arguments +- `analysis_template_identifier`: The identifier for the analysis template resource. +- `membership_identifier`: The identifier for a membership resource. + +""" +function delete_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_collaboration(collaboration_identifier) delete_collaboration(collaboration_identifier, params::Dict{String,<:Any}) @@ -412,6 +770,46 @@ function delete_collaboration( ) end +""" + delete_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier) + delete_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the information necessary to delete a configured audience model association. + +# Arguments +- `configured_audience_model_association_identifier`: A unique identifier of the configured + audience model association that you want to delete. +- `membership_identifier`: A unique identifier of the membership that contains the audience + model association that you want to delete. + +""" +function delete_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_configured_table(configured_table_identifier) delete_configured_table(configured_table_identifier, params::Dict{String,<:Any}) @@ -599,18 +997,97 @@ function delete_membership( end """ - get_collaboration(collaboration_identifier) - get_collaboration(collaboration_identifier, params::Dict{String,<:Any}) + delete_privacy_budget_template(membership_identifier, privacy_budget_template_identifier) + delete_privacy_budget_template(membership_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) -Returns metadata about a collaboration. +Deletes a privacy budget template for a specified membership. # Arguments -- `collaboration_identifier`: The identifier for the collaboration. +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget template is deleted from the collaboration that this + membership belongs to. Accepts a membership ID. +- `privacy_budget_template_identifier`: A unique identifier for your privacy budget + template. """ -function get_collaboration( - collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() -) +function delete_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_analysis_template(analysis_template_identifier, membership_identifier) + get_analysis_template(analysis_template_identifier, membership_identifier, params::Dict{String,<:Any}) + +Retrieves an analysis template. + +# Arguments +- `analysis_template_identifier`: The identifier for the analysis template resource. +- `membership_identifier`: The identifier for a membership resource. + +""" +function get_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_collaboration(collaboration_identifier) + get_collaboration(collaboration_identifier, params::Dict{String,<:Any}) + +Returns metadata about a collaboration. + +# Arguments +- `collaboration_identifier`: The identifier for the collaboration. + +""" +function get_collaboration( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) return cleanrooms( "GET", "/collaborations/$(collaborationIdentifier)"; @@ -632,6 +1109,165 @@ function get_collaboration( ) end +""" + get_collaboration_analysis_template(analysis_template_arn, collaboration_identifier) + get_collaboration_analysis_template(analysis_template_arn, collaboration_identifier, params::Dict{String,<:Any}) + +Retrieves an analysis template within a collaboration. + +# Arguments +- `analysis_template_arn`: The Amazon Resource Name (ARN) associated with the analysis + template within a collaboration. +- `collaboration_identifier`: A unique identifier for the collaboration that the analysis + templates belong to. Currently accepts collaboration ID. + +""" +function get_collaboration_analysis_template( + analysisTemplateArn, + collaborationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/analysistemplates/$(analysisTemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_analysis_template( + analysisTemplateArn, + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/analysistemplates/$(analysisTemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_collaboration_configured_audience_model_association(collaboration_identifier, configured_audience_model_association_identifier) + get_collaboration_configured_audience_model_association(collaboration_identifier, configured_audience_model_association_identifier, params::Dict{String,<:Any}) + +Retrieves a configured audience model association within a collaboration. + +# Arguments +- `collaboration_identifier`: A unique identifier for the collaboration that the configured + audience model association belongs to. Accepts a collaboration ID. +- `configured_audience_model_association_identifier`: A unique identifier for the + configured audience model association that you want to retrieve. + +""" +function get_collaboration_configured_audience_model_association( + collaborationIdentifier, + configuredAudienceModelAssociationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_configured_audience_model_association( + collaborationIdentifier, + configuredAudienceModelAssociationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier) + get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) + +Returns details about a specified privacy budget template. + +# Arguments +- `collaboration_identifier`: A unique identifier for one of your collaborations. +- `privacy_budget_template_identifier`: A unique identifier for one of your privacy budget + templates. + +""" +function get_collaboration_privacy_budget_template( + collaborationIdentifier, + privacyBudgetTemplateIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_privacy_budget_template( + collaborationIdentifier, + privacyBudgetTemplateIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier) + get_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Returns information about a configured audience model association. + +# Arguments +- `configured_audience_model_association_identifier`: A unique identifier for the + configured audience model association that you want to retrieve. +- `membership_identifier`: A unique identifier for the membership that contains the + configured audience model association that you want to retrieve. + +""" +function get_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_configured_table(configured_table_identifier) get_configured_table(configured_table_identifier, params::Dict{String,<:Any}) @@ -780,6 +1416,47 @@ function get_membership( ) end +""" + get_privacy_budget_template(membership_identifier, privacy_budget_template_identifier) + get_privacy_budget_template(membership_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) + +Returns details for a specified privacy budget template. + +# Arguments +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget template is retrieved from the collaboration that this + membership belongs to. Accepts a membership ID. +- `privacy_budget_template_identifier`: A unique identifier for your privacy budget + template. + +""" +function get_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_protected_query(membership_identifier, protected_query_identifier) get_protected_query(membership_identifier, protected_query_identifier, params::Dict{String,<:Any}) @@ -869,27 +1546,239 @@ Retrieves a schema analysis rule. uniquely identified by a combination of the collaboration, the schema name, and their type. """ -function get_schema_analysis_rule( - collaborationIdentifier, name, type; aws_config::AbstractAWSConfig=global_aws_config() +function get_schema_analysis_rule( + collaborationIdentifier, name, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/schemas/$(name)/analysisRule/$(type)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_schema_analysis_rule( + collaborationIdentifier, + name, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/schemas/$(name)/analysisRule/$(type)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_analysis_templates(membership_identifier) + list_analysis_templates(membership_identifier, params::Dict{String,<:Any}) + +Lists analysis templates that the caller owns. + +# Arguments +- `membership_identifier`: The identifier for a membership resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_analysis_templates( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/analysistemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_analysis_templates( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/analysistemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_collaboration_analysis_templates(collaboration_identifier) + list_collaboration_analysis_templates(collaboration_identifier, params::Dict{String,<:Any}) + +Lists analysis templates within a collaboration. + +# Arguments +- `collaboration_identifier`: A unique identifier for the collaboration that the analysis + templates belong to. Currently accepts collaboration ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_collaboration_analysis_templates( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/analysistemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_analysis_templates( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/analysistemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_collaboration_configured_audience_model_associations(collaboration_identifier) + list_collaboration_configured_audience_model_associations(collaboration_identifier, params::Dict{String,<:Any}) + +Lists configured audience model associations within a collaboration. + +# Arguments +- `collaboration_identifier`: A unique identifier for the collaboration that the configured + audience model association belongs to. Accepts a collaboration ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_collaboration_configured_audience_model_associations( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/configuredaudiencemodelassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_configured_audience_model_associations( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/configuredaudiencemodelassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_collaboration_privacy_budget_templates(collaboration_identifier) + list_collaboration_privacy_budget_templates(collaboration_identifier, params::Dict{String,<:Any}) + +Returns an array that summarizes each privacy budget template in a specified collaboration. + +# Arguments +- `collaboration_identifier`: A unique identifier for one of your collaborations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_collaboration_privacy_budget_templates( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/privacybudgettemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_privacy_budget_templates( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/privacybudgettemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_collaboration_privacy_budgets(collaboration_identifier, privacy_budget_type) + list_collaboration_privacy_budgets(collaboration_identifier, privacy_budget_type, params::Dict{String,<:Any}) + +Returns an array that summarizes each privacy budget in a specified collaboration. The +summary includes the collaboration ARN, creation time, creating account, and privacy budget +details. + +# Arguments +- `collaboration_identifier`: A unique identifier for one of your collaborations. +- `privacy_budget_type`: Specifies the type of the privacy budget. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_collaboration_privacy_budgets( + collaborationIdentifier, + privacyBudgetType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/collaborations/$(collaborationIdentifier)/schemas/$(name)/analysisRule/$(type)"; + "/collaborations/$(collaborationIdentifier)/privacybudgets", + Dict{String,Any}("privacyBudgetType" => privacyBudgetType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_schema_analysis_rule( +function list_collaboration_privacy_budgets( collaborationIdentifier, - name, - type, + privacyBudgetType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/collaborations/$(collaborationIdentifier)/schemas/$(name)/analysisRule/$(type)", - params; + "/collaborations/$(collaborationIdentifier)/privacybudgets", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("privacyBudgetType" => privacyBudgetType), params + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -927,6 +1816,48 @@ function list_collaborations( ) end +""" + list_configured_audience_model_associations(membership_identifier) + list_configured_audience_model_associations(membership_identifier, params::Dict{String,<:Any}) + +Lists information about requested configured audience model associations. + +# Arguments +- `membership_identifier`: A unique identifier for a membership that contains the + configured audience model associations that you want to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_configured_audience_model_associations( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configured_audience_model_associations( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_configured_table_associations(membership_identifier) list_configured_table_associations(membership_identifier, params::Dict{String,<:Any}) @@ -1066,6 +1997,101 @@ function list_memberships( ) end +""" + list_privacy_budget_templates(membership_identifier) + list_privacy_budget_templates(membership_identifier, params::Dict{String,<:Any}) + +Returns detailed information about the privacy budget templates in a specified membership. + +# Arguments +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget templates are retrieved from the collaboration that this + membership belongs to. Accepts a membership ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_privacy_budget_templates( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgettemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_privacy_budget_templates( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgettemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_privacy_budgets(membership_identifier, privacy_budget_type) + list_privacy_budgets(membership_identifier, privacy_budget_type, params::Dict{String,<:Any}) + +Returns detailed information about the privacy budgets in a specified membership. + +# Arguments +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget is retrieved from the collaboration that this membership + belongs to. Accepts a membership ID. +- `privacy_budget_type`: The privacy budget type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_privacy_budgets( + membershipIdentifier, + privacyBudgetType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgets", + Dict{String,Any}("privacyBudgetType" => privacyBudgetType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_privacy_budgets( + membershipIdentifier, + privacyBudgetType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/privacybudgets", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("privacyBudgetType" => privacyBudgetType), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_protected_queries(membership_identifier) list_protected_queries(membership_identifier, params::Dict{String,<:Any}) @@ -1186,22 +2212,64 @@ function list_tags_for_resource( end """ - start_protected_query(membership_identifier, result_configuration, sql_parameters, type) - start_protected_query(membership_identifier, result_configuration, sql_parameters, type, params::Dict{String,<:Any}) + preview_privacy_impact(membership_identifier, parameters) + preview_privacy_impact(membership_identifier, parameters, params::Dict{String,<:Any}) + +An estimate of the number of aggregation functions that the member who can query can run +given epsilon and noise parameters. + +# Arguments +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. Accepts a membership ID. +- `parameters`: Specifies the desired epsilon and noise parameters to preview. + +""" +function preview_privacy_impact( + membershipIdentifier, parameters; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/previewprivacyimpact", + Dict{String,Any}("parameters" => parameters); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function preview_privacy_impact( + membershipIdentifier, + parameters, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/previewprivacyimpact", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("parameters" => parameters), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_protected_query(membership_identifier, sql_parameters, type) + start_protected_query(membership_identifier, sql_parameters, type, params::Dict{String,<:Any}) -Creates a protected query that is started by AWS Clean Rooms. +Creates a protected query that is started by Clean Rooms. # Arguments - `membership_identifier`: A unique identifier for the membership to run this query against. Currently accepts a membership ID. -- `result_configuration`: The details needed to write the query results. - `sql_parameters`: The protected SQL query parameters. - `type`: The type of the protected query to be started. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"resultConfiguration"`: The details needed to write the query results. """ function start_protected_query( membershipIdentifier, - resultConfiguration, sqlParameters, type; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1209,18 +2277,13 @@ function start_protected_query( return cleanrooms( "POST", "/memberships/$(membershipIdentifier)/protectedQueries", - Dict{String,Any}( - "resultConfiguration" => resultConfiguration, - "sqlParameters" => sqlParameters, - "type" => type, - ); + Dict{String,Any}("sqlParameters" => sqlParameters, "type" => type); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function start_protected_query( membershipIdentifier, - resultConfiguration, sqlParameters, type, params::AbstractDict{String}; @@ -1232,11 +2295,7 @@ function start_protected_query( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "resultConfiguration" => resultConfiguration, - "sqlParameters" => sqlParameters, - "type" => type, - ), + Dict{String,Any}("sqlParameters" => sqlParameters, "type" => type), params, ), ); @@ -1319,6 +2378,47 @@ function untag_resource( ) end +""" + update_analysis_template(analysis_template_identifier, membership_identifier) + update_analysis_template(analysis_template_identifier, membership_identifier, params::Dict{String,<:Any}) + +Updates the analysis template metadata. + +# Arguments +- `analysis_template_identifier`: The identifier for the analysis template resource. +- `membership_identifier`: The identifier for a membership resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the analysis template. +""" +function update_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_analysis_template( + analysisTemplateIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/analysistemplates/$(analysisTemplateIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_collaboration(collaboration_identifier) update_collaboration(collaboration_identifier, params::Dict{String,<:Any}) @@ -1358,6 +2458,50 @@ function update_collaboration( ) end +""" + update_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier) + update_configured_audience_model_association(configured_audience_model_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details necessary to update a configured audience model association. + +# Arguments +- `configured_audience_model_association_identifier`: A unique identifier for the + configured audience model association that you want to update. +- `membership_identifier`: A unique identifier of the membership that contains the + configured audience model association that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the configured audience model association. +- `"name"`: A new name for the configured audience model association. +""" +function update_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configured_audience_model_association( + configuredAudienceModelAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredaudiencemodelassociations/$(configuredAudienceModelAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_configured_table(configured_table_identifier) update_configured_table(configured_table_identifier, params::Dict{String,<:Any}) @@ -1502,8 +2646,10 @@ Updates a membership. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"defaultResultConfiguration"`: The default protected query result configuration as + specified by the member who can receive results. - `"queryLogStatus"`: An indicator as to whether query logging has been enabled or disabled - for the collaboration. + for the membership. """ function update_membership( membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -1529,6 +2675,59 @@ function update_membership( ) end +""" + update_privacy_budget_template(membership_identifier, privacy_budget_template_identifier, privacy_budget_type) + update_privacy_budget_template(membership_identifier, privacy_budget_template_identifier, privacy_budget_type, params::Dict{String,<:Any}) + +Updates the privacy budget template for the specified membership. + +# Arguments +- `membership_identifier`: A unique identifier for one of your memberships for a + collaboration. The privacy budget template is updated in the collaboration that this + membership belongs to. Accepts a membership ID. +- `privacy_budget_template_identifier`: A unique identifier for your privacy budget + template that you want to update. +- `privacy_budget_type`: Specifies the type of the privacy budget template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: Specifies the epsilon and noise parameters for the privacy budget + template. +""" +function update_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier, + privacyBudgetType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)", + Dict{String,Any}("privacyBudgetType" => privacyBudgetType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_privacy_budget_template( + membershipIdentifier, + privacyBudgetTemplateIdentifier, + privacyBudgetType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/privacybudgettemplates/$(privacyBudgetTemplateIdentifier)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("privacyBudgetType" => privacyBudgetType), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_protected_query(membership_identifier, protected_query_identifier, target_status) update_protected_query(membership_identifier, protected_query_identifier, target_status, params::Dict{String,<:Any}) diff --git a/src/services/cleanroomsml.jl b/src/services/cleanroomsml.jl new file mode 100644 index 0000000000..fa1bacaad4 --- /dev/null +++ b/src/services/cleanroomsml.jl @@ -0,0 +1,1126 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: cleanroomsml +using AWS.Compat +using AWS.UUIDs + +""" + create_audience_model(name, training_dataset_arn) + create_audience_model(name, training_dataset_arn, params::Dict{String,<:Any}) + +Defines the information necessary to create an audience model. An audience model is a +machine learning model that Clean Rooms ML trains to measure similarity between users. +Clean Rooms ML manages training and storing the audience model. The audience model can be +used in multiple calls to the StartAudienceGenerationJob API. + +# Arguments +- `name`: The name of the audience model resource. +- `training_dataset_arn`: The Amazon Resource Name (ARN) of the training dataset for this + audience model. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the audience model. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt + and decrypt customer-owned data in the trained ML model and the associated data. +- `"tags"`: The optional metadata that you apply to the resource to help you categorize and + organize them. Each tag consists of a key and an optional value, both of which you define. + The following basic restrictions apply to tags: Maximum number of tags per resource - 50. + For each resource, each tag key must be unique, and each tag key can have only one value. + Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 + Unicode characters in UTF-8. If your tagging schema is used across multiple services and + resources, remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do + not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as + it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can + have this prefix. If a tag value has aws as its prefix but the key does not, then Clean + Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags + with only the key prefix of aws do not count against your tags per resource limit. +- `"trainingDataEndTime"`: The end date and time of the training window. +- `"trainingDataStartTime"`: The start date and time of the training window. +""" +function create_audience_model( + name, trainingDatasetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "POST", + "/audience-model", + Dict{String,Any}("name" => name, "trainingDatasetArn" => trainingDatasetArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_audience_model( + name, + trainingDatasetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/audience-model", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "trainingDatasetArn" => trainingDatasetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_configured_audience_model(audience_model_arn, name, output_config, shared_audience_metrics) + create_configured_audience_model(audience_model_arn, name, output_config, shared_audience_metrics, params::Dict{String,<:Any}) + +Defines the information necessary to create a configured audience model. + +# Arguments +- `audience_model_arn`: The Amazon Resource Name (ARN) of the audience model to use for the + configured audience model. +- `name`: The name of the configured audience model. +- `output_config`: Configure the Amazon S3 location and IAM Role for audiences created + using this configured audience model. Each audience will have a unique location. The IAM + Role must have s3:PutObject permission on the destination Amazon S3 location. If the + destination is protected with Amazon S3 KMS-SSE, then the Role must also have the required + KMS permissions. +- `shared_audience_metrics`: Whether audience metrics are shared. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"audienceSizeConfig"`: Configure the list of output sizes of audiences that can be + created using this configured audience model. A request to StartAudienceGenerationJob that + uses this configured audience model must have an audienceSize selected from this list. You + can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of + identifiers in the output. You can use the Percentage AudienceSize to configure sizes in + the range 1-100 percent. +- `"childResourceTagOnCreatePolicy"`: Configure how the service tags audience generation + jobs created using this configured audience model. If you specify NONE, the tags from the + StartAudienceGenerationJob request determine the tags of the audience generation job. If + you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the + configured audience model, by default. Tags in the StartAudienceGenerationJob will override + the default. When the client is in a different account than the configured audience model, + the tags from the client are never applied to a resource in the caller's account. +- `"description"`: The description of the configured audience model. +- `"minMatchingSeedSize"`: The minimum number of users from the seed audience that must + match with users in the training data of the audience model. The default value is 500. +- `"tags"`: The optional metadata that you apply to the resource to help you categorize and + organize them. Each tag consists of a key and an optional value, both of which you define. + The following basic restrictions apply to tags: Maximum number of tags per resource - 50. + For each resource, each tag key must be unique, and each tag key can have only one value. + Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 + Unicode characters in UTF-8. If your tagging schema is used across multiple services and + resources, remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do + not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as + it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can + have this prefix. If a tag value has aws as its prefix but the key does not, then Clean + Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags + with only the key prefix of aws do not count against your tags per resource limit. +""" +function create_configured_audience_model( + audienceModelArn, + name, + outputConfig, + sharedAudienceMetrics; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/configured-audience-model", + Dict{String,Any}( + "audienceModelArn" => audienceModelArn, + "name" => name, + "outputConfig" => outputConfig, + "sharedAudienceMetrics" => sharedAudienceMetrics, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configured_audience_model( + audienceModelArn, + name, + outputConfig, + sharedAudienceMetrics, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/configured-audience-model", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "audienceModelArn" => audienceModelArn, + "name" => name, + "outputConfig" => outputConfig, + "sharedAudienceMetrics" => sharedAudienceMetrics, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_training_dataset(name, role_arn, training_data) + create_training_dataset(name, role_arn, training_data, params::Dict{String,<:Any}) + +Defines the information necessary to create a training dataset. In Clean Rooms ML, the +TrainingDataset is metadata that points to a Glue table, which is read only during +AudienceModel creation. + +# Arguments +- `name`: The name of the training dataset. This name must be unique in your account and + region. +- `role_arn`: The ARN of the IAM role that Clean Rooms ML can assume to read the data + referred to in the dataSource field of each dataset. Passing a role across AWS accounts is + not allowed. If you pass a role that isn't in your account, you get an + AccessDeniedException error. +- `training_data`: An array of information that lists the Dataset objects, which specifies + the dataset type and details on its location and schema. You must provide a role that has + read access to these tables. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the training dataset. +- `"tags"`: The optional metadata that you apply to the resource to help you categorize and + organize them. Each tag consists of a key and an optional value, both of which you define. + The following basic restrictions apply to tags: Maximum number of tags per resource - 50. + For each resource, each tag key must be unique, and each tag key can have only one value. + Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 + Unicode characters in UTF-8. If your tagging schema is used across multiple services and + resources, remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do + not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as + it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can + have this prefix. If a tag value has aws as its prefix but the key does not, then Clean + Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags + with only the key prefix of aws do not count against your tags per resource limit. +""" +function create_training_dataset( + name, roleArn, trainingData; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "POST", + "/training-dataset", + Dict{String,Any}( + "name" => name, "roleArn" => roleArn, "trainingData" => trainingData + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_training_dataset( + name, + roleArn, + trainingData, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/training-dataset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "roleArn" => roleArn, "trainingData" => trainingData + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_audience_generation_job(audience_generation_job_arn) + delete_audience_generation_job(audience_generation_job_arn, params::Dict{String,<:Any}) + +Deletes the specified audience generation job, and removes all data associated with the job. + +# Arguments +- `audience_generation_job_arn`: The Amazon Resource Name (ARN) of the audience generation + job that you want to delete. + +""" +function delete_audience_generation_job( + audienceGenerationJobArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/audience-generation-job/$(audienceGenerationJobArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_audience_generation_job( + audienceGenerationJobArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/audience-generation-job/$(audienceGenerationJobArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_audience_model(audience_model_arn) + delete_audience_model(audience_model_arn, params::Dict{String,<:Any}) + +Specifies an audience model that you want to delete. You can't delete an audience model if +there are any configured audience models that depend on the audience model. + +# Arguments +- `audience_model_arn`: The Amazon Resource Name (ARN) of the audience model that you want + to delete. + +""" +function delete_audience_model( + audienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/audience-model/$(audienceModelArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_audience_model( + audienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/audience-model/$(audienceModelArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_configured_audience_model(configured_audience_model_arn) + delete_configured_audience_model(configured_audience_model_arn, params::Dict{String,<:Any}) + +Deletes the specified configured audience model. You can't delete a configured audience +model if there are any lookalike models that use the configured audience model. If you +delete a configured audience model, it will be removed from any collaborations that it is +associated to. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that you want to delete. + +""" +function delete_configured_audience_model( + configuredAudienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/configured-audience-model/$(configuredAudienceModelArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_audience_model( + configuredAudienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/configured-audience-model/$(configuredAudienceModelArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_configured_audience_model_policy(configured_audience_model_arn) + delete_configured_audience_model_policy(configured_audience_model_arn, params::Dict{String,<:Any}) + +Deletes the specified configured audience model policy. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model policy that you want to delete. + +""" +function delete_configured_audience_model_policy( + configuredAudienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/configured-audience-model/$(configuredAudienceModelArn)/policy"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_audience_model_policy( + configuredAudienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/configured-audience-model/$(configuredAudienceModelArn)/policy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_training_dataset(training_dataset_arn) + delete_training_dataset(training_dataset_arn, params::Dict{String,<:Any}) + +Specifies a training dataset that you want to delete. You can't delete a training dataset +if there are any audience models that depend on the training dataset. In Clean Rooms ML, +the TrainingDataset is metadata that points to a Glue table, which is read only during +AudienceModel creation. This action deletes the metadata. + +# Arguments +- `training_dataset_arn`: The Amazon Resource Name (ARN) of the training dataset that you + want to delete. + +""" +function delete_training_dataset( + trainingDatasetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/training-dataset/$(trainingDatasetArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_training_dataset( + trainingDatasetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/training-dataset/$(trainingDatasetArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_audience_generation_job(audience_generation_job_arn) + get_audience_generation_job(audience_generation_job_arn, params::Dict{String,<:Any}) + +Returns information about an audience generation job. + +# Arguments +- `audience_generation_job_arn`: The Amazon Resource Name (ARN) of the audience generation + job that you are interested in. + +""" +function get_audience_generation_job( + audienceGenerationJobArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/audience-generation-job/$(audienceGenerationJobArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_audience_generation_job( + audienceGenerationJobArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/audience-generation-job/$(audienceGenerationJobArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_audience_model(audience_model_arn) + get_audience_model(audience_model_arn, params::Dict{String,<:Any}) + +Returns information about an audience model + +# Arguments +- `audience_model_arn`: The Amazon Resource Name (ARN) of the audience model that you are + interested in. + +""" +function get_audience_model( + audienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/audience-model/$(audienceModelArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_audience_model( + audienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/audience-model/$(audienceModelArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_audience_model(configured_audience_model_arn) + get_configured_audience_model(configured_audience_model_arn, params::Dict{String,<:Any}) + +Returns information about a specified configured audience model. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that you are interested in. + +""" +function get_configured_audience_model( + configuredAudienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/configured-audience-model/$(configuredAudienceModelArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_audience_model( + configuredAudienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/configured-audience-model/$(configuredAudienceModelArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_audience_model_policy(configured_audience_model_arn) + get_configured_audience_model_policy(configured_audience_model_arn, params::Dict{String,<:Any}) + +Returns information about a configured audience model policy. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that you are interested in. + +""" +function get_configured_audience_model_policy( + configuredAudienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/configured-audience-model/$(configuredAudienceModelArn)/policy"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_audience_model_policy( + configuredAudienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/configured-audience-model/$(configuredAudienceModelArn)/policy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_training_dataset(training_dataset_arn) + get_training_dataset(training_dataset_arn, params::Dict{String,<:Any}) + +Returns information about a training dataset. + +# Arguments +- `training_dataset_arn`: The Amazon Resource Name (ARN) of the training dataset that you + are interested in. + +""" +function get_training_dataset( + trainingDatasetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/training-dataset/$(trainingDatasetArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_training_dataset( + trainingDatasetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/training-dataset/$(trainingDatasetArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_audience_export_jobs() + list_audience_export_jobs(params::Dict{String,<:Any}) + +Returns a list of the audience export jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"audienceGenerationJobArn"`: The Amazon Resource Name (ARN) of the audience generation + job that you are interested in. +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_audience_export_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return cleanroomsml( + "GET", + "/audience-export-job"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_audience_export_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/audience-export-job", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_audience_generation_jobs() + list_audience_generation_jobs(params::Dict{String,<:Any}) + +Returns a list of audience generation jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"collaborationId"`: The identifier of the collaboration that contains the audience + generation jobs that you are interested in. +- `"configuredAudienceModelArn"`: The Amazon Resource Name (ARN) of the configured audience + model that was used for the audience generation jobs that you are interested in. +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_audience_generation_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return cleanroomsml( + "GET", + "/audience-generation-job"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_audience_generation_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/audience-generation-job", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_audience_models() + list_audience_models(params::Dict{String,<:Any}) + +Returns a list of audience models. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_audience_models(; aws_config::AbstractAWSConfig=global_aws_config()) + return cleanroomsml( + "GET", "/audience-model"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_audience_models( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/audience-model", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_configured_audience_models() + list_configured_audience_models(params::Dict{String,<:Any}) + +Returns a list of the configured audience models. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_configured_audience_models(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/configured-audience-model"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configured_audience_models( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/configured-audience-model", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags for a provided resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you are interested in. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_training_datasets() + list_training_datasets(params::Dict{String,<:Any}) + +Returns a list of training datasets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. +- `"nextToken"`: The token value retrieved from a previous call to access the next page of + results. +""" +function list_training_datasets(; aws_config::AbstractAWSConfig=global_aws_config()) + return cleanroomsml( + "GET", "/training-dataset"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_training_datasets( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "GET", + "/training-dataset", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_configured_audience_model_policy(configured_audience_model_arn, configured_audience_model_policy) + put_configured_audience_model_policy(configured_audience_model_arn, configured_audience_model_policy, params::Dict{String,<:Any}) + +Create or update the resource policy for a configured audience model. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that the resource policy will govern. +- `configured_audience_model_policy`: The IAM resource policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"policyExistenceCondition"`: Use this to prevent unexpected concurrent modification of + the policy. +- `"previousPolicyHash"`: A cryptographic hash of the contents of the policy used to + prevent unexpected concurrent modification of the policy. +""" +function put_configured_audience_model_policy( + configuredAudienceModelArn, + configuredAudienceModelPolicy; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "PUT", + "/configured-audience-model/$(configuredAudienceModelArn)/policy", + Dict{String,Any}("configuredAudienceModelPolicy" => configuredAudienceModelPolicy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_configured_audience_model_policy( + configuredAudienceModelArn, + configuredAudienceModelPolicy, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "PUT", + "/configured-audience-model/$(configuredAudienceModelArn)/policy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuredAudienceModelPolicy" => configuredAudienceModelPolicy + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_audience_export_job(audience_generation_job_arn, audience_size, name) + start_audience_export_job(audience_generation_job_arn, audience_size, name, params::Dict{String,<:Any}) + +Export an audience of a specified size after you have generated an audience. + +# Arguments +- `audience_generation_job_arn`: The Amazon Resource Name (ARN) of the audience generation + job that you want to export. +- `audience_size`: +- `name`: The name of the audience export job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the audience export job. +""" +function start_audience_export_job( + audienceGenerationJobArn, + audienceSize, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/audience-export-job", + Dict{String,Any}( + "audienceGenerationJobArn" => audienceGenerationJobArn, + "audienceSize" => audienceSize, + "name" => name, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_audience_export_job( + audienceGenerationJobArn, + audienceSize, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/audience-export-job", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "audienceGenerationJobArn" => audienceGenerationJobArn, + "audienceSize" => audienceSize, + "name" => name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_audience_generation_job(configured_audience_model_arn, name, seed_audience) + start_audience_generation_job(configured_audience_model_arn, name, seed_audience, params::Dict{String,<:Any}) + +Information necessary to start the audience generation job. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that is used for this audience generation job. +- `name`: The name of the audience generation job. +- `seed_audience`: The seed audience that is used to generate the audience. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"collaborationId"`: The identifier of the collaboration that contains the audience + generation job. +- `"description"`: The description of the audience generation job. +- `"includeSeedInOutput"`: Whether the seed audience is included in the audience generation + output. +- `"tags"`: The optional metadata that you apply to the resource to help you categorize and + organize them. Each tag consists of a key and an optional value, both of which you define. + The following basic restrictions apply to tags: Maximum number of tags per resource - 50. + For each resource, each tag key must be unique, and each tag key can have only one value. + Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 + Unicode characters in UTF-8. If your tagging schema is used across multiple services and + resources, remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do + not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as + it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can + have this prefix. If a tag value has aws as its prefix but the key does not, then Clean + Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags + with only the key prefix of aws do not count against your tags per resource limit. +""" +function start_audience_generation_job( + configuredAudienceModelArn, + name, + seedAudience; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/audience-generation-job", + Dict{String,Any}( + "configuredAudienceModelArn" => configuredAudienceModelArn, + "name" => name, + "seedAudience" => seedAudience, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_audience_generation_job( + configuredAudienceModelArn, + name, + seedAudience, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/audience-generation-job", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuredAudienceModelArn" => configuredAudienceModelArn, + "name" => name, + "seedAudience" => seedAudience, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds metadata tags to a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to assign + tags. +- `tags`: The optional metadata that you apply to the resource to help you categorize and + organize them. Each tag consists of a key and an optional value, both of which you define. + The following basic restrictions apply to tags: Maximum number of tags per resource - 50. + For each resource, each tag key must be unique, and each tag key can have only one value. + Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 + Unicode characters in UTF-8. If your tagging schema is used across multiple services and + resources, remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do + not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as + it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can + have this prefix. If a tag value has aws as its prefix but the key does not, then Clean + Rooms considers it to be a user tag and will count against the limit of 50 tags. Tags with + only the key prefix of aws do not count against your tags per resource limit. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return cleanroomsml( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes metadata tags from a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to remove + tags from. +- `tag_keys`: The key values of tags that you want to remove. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_configured_audience_model(configured_audience_model_arn) + update_configured_audience_model(configured_audience_model_arn, params::Dict{String,<:Any}) + +Provides the information necessary to update a configured audience model. Updates that +impact audience generation jobs take effect when a new job starts, but do not impact +currently running jobs. + +# Arguments +- `configured_audience_model_arn`: The Amazon Resource Name (ARN) of the configured + audience model that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"audienceModelArn"`: The Amazon Resource Name (ARN) of the new audience model that you + want to use. +- `"audienceSizeConfig"`: The new audience size configuration. +- `"description"`: The new description of the configured audience model. +- `"minMatchingSeedSize"`: The minimum number of users from the seed audience that must + match with users in the training data of the audience model. +- `"outputConfig"`: The new output configuration. +- `"sharedAudienceMetrics"`: The new value for whether to share audience metrics. +""" +function update_configured_audience_model( + configuredAudienceModelArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanroomsml( + "PATCH", + "/configured-audience-model/$(configuredAudienceModelArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configured_audience_model( + configuredAudienceModelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanroomsml( + "PATCH", + "/configured-audience-model/$(configuredAudienceModelArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/cloud9.jl b/src/services/cloud9.jl index a5496d2ec5..a28e56e065 100644 --- a/src/services/cloud9.jl +++ b/src/services/cloud9.jl @@ -5,13 +5,29 @@ using AWS.Compat using AWS.UUIDs """ - create_environment_ec2(instance_type, name) - create_environment_ec2(instance_type, name, params::Dict{String,<:Any}) + create_environment_ec2(image_id, instance_type, name) + create_environment_ec2(image_id, instance_type, name, params::Dict{String,<:Any}) Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment. # Arguments +- `image_id`: The identifier for the Amazon Machine Image (AMI) that's used to create the + EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a + valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required + to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be + reflected across all direct methods of communicating with the API, such as Amazon Web + Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change + will only affect direct API consumers, and not Cloud9 console users. We recommend using + Amazon Linux 2023 as the AMI to create your environment as it is fully supported. Since + Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu + 22.04. AMI aliases Amazon Linux 2: amazonlinux-2-x86_64 Amazon Linux 2023 + (recommended): amazonlinux-2023-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu + 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux 2: + resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Amazon Linux 2023 + (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64 Ubuntu + 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: + resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 - `instance_type`: The type of instance to connect to the environment (for example, t2.micro). - `name`: The name of the environment to create. This name is visible to other IAM users in @@ -32,18 +48,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"imageId"`: The identifier for the Amazon Machine Image (AMI) that's used to create the - EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a - valid Amazon EC2 Systems Manager (SSM) path. The default Amazon Linux AMI is currently used - if the parameter isn't explicitly assigned a value in the request. In the future the - parameter for Amazon Linux will no longer be available when you specify an AMI for your - instance. Amazon Linux 2 will then become the default AMI, which is used to launch your - instance if no parameter is explicitly defined. AMI aliases Amazon Linux (default): - amazonlinux-1-x86_64 Amazon Linux 2: amazonlinux-2-x86_64 Ubuntu 18.04: - ubuntu-18.04-x86_64 SSM paths Amazon Linux (default): - resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 Amazon Linux 2: - resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Ubuntu 18.04: - resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 - `"ownerArn"`: The Amazon Resource Name (ARN) of the environment owner. This ARN can be the ARN of any IAM principal. If this value is not specified, the ARN defaults to this environment's creator. @@ -53,16 +57,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys development environment. """ function create_environment_ec2( - instanceType, name; aws_config::AbstractAWSConfig=global_aws_config() + imageId, instanceType, name; aws_config::AbstractAWSConfig=global_aws_config() ) return cloud9( "CreateEnvironmentEC2", - Dict{String,Any}("instanceType" => instanceType, "name" => name); + Dict{String,Any}( + "imageId" => imageId, "instanceType" => instanceType, "name" => name + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_environment_ec2( + imageId, instanceType, name, params::AbstractDict{String}; @@ -73,7 +80,9 @@ function create_environment_ec2( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("instanceType" => instanceType, "name" => name), + Dict{String,Any}( + "imageId" => imageId, "instanceType" => instanceType, "name" => name + ), params, ), ); diff --git a/src/services/cloudcontrol.jl b/src/services/cloudcontrol.jl index dc2e1bff51..35d608ec04 100644 --- a/src/services/cloudcontrol.jl +++ b/src/services/cloudcontrol.jl @@ -54,18 +54,12 @@ using the RequestToken of the ProgressEvent type returned by CreateResource. # Arguments - `desired_state`: Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values. Cloud Control API - currently supports JSON as a structured data format. <p>Specify the desired state - as one of the following:</p> <ul> <li> <p>A JSON blob</p> - </li> <li> <p>A local path containing the desired state in JSON data - format</p> </li> </ul> <p>For more information, see <a - href="https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations - -create.html#resource-operations-create-desiredstate">Composing the desired state - of the resource</a> in the <i>Amazon Web Services Cloud Control API User - Guide</i>.</p> <p>For more information about the properties of a specific - resource, refer to the related topic for the resource in the <a - href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resou - rce-type-ref.html">Resource and property types reference</a> in the - <i>CloudFormation Users Guide</i>.</p> + currently supports JSON as a structured data format. Specify the desired state as one of + the following: A JSON blob A local path containing the desired state in JSON data + format For more information, see Composing the desired state of the resource in the + Amazon Web Services Cloud Control API User Guide. For more information about the properties + of a specific resource, refer to the related topic for the resource in the Resource and + property types reference in the CloudFormation Users Guide. - `type_name`: The name of the resource type. # Optional Parameters diff --git a/src/services/cloudformation.jl b/src/services/cloudformation.jl index 169183f478..55f85d7e36 100644 --- a/src/services/cloudformation.jl +++ b/src/services/cloudformation.jl @@ -323,7 +323,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Using CloudFormation macros to perform custom processing on - templates. + templates. Only one of the Capabilities and ResourceType parameters can be specified. - `"ChangeSetType"`: The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT. If you create a change set for @@ -336,12 +336,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys create another change set with the same name. You might retry CreateChangeSet requests to ensure that CloudFormation successfully received them. - `"Description"`: A description to help you identify this change set. +- `"ImportExistingResources"`: Indicates if the change set imports resources that already + exist. This parameter can only import resources that have custom names in templates. For + more information, see name type in the CloudFormation User Guide. To import resources that + do not accept custom names, such as EC2 instances, use the resource import feature instead. + For more information, see Bringing existing resources into CloudFormation management in the + CloudFormation User Guide. - `"IncludeNestedStacks"`: Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True. - `"NotificationARNs"`: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. +- `"OnStackFailure"`: Determines what action will be taken if stack creation fails. If this + parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation + must not be specified. This must be one of these values: DELETE - Deletes the change set + if the stack creation fails. This is only valid when the ChangeSetType parameter is set to + CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. + DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true + for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if + the stack creation fails, roll back the stack. This is equivalent to specifying false for + the DisableRollback parameter to the ExecuteChangeSet API operation. For nested stacks, + when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, + any failure in a child stack will cause the parent stack creation to fail and all stacks to + be deleted. - `"Parameters"`: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. - `"ResourceTypes"`: The template resource types that you have permissions to work with if @@ -350,7 +368,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Controlling - access with Identity and Access Management in the CloudFormation User Guide. + access with Identity and Access Management in the CloudFormation User Guide. Only one of + the Capabilities and ResourceType parameters can be specified. - `"ResourcesToImport"`: The resources to import into your stack. - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the change set. CloudFormation uses the @@ -371,8 +390,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TemplateURL"`: The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. CloudFormation generates the change set by comparing this - template with the stack that you specified. Conditional: You must specify only TemplateBody - or TemplateURL. + template with the stack that you specified. The location for an Amazon S3 bucket must start + with https://. Conditional: You must specify only TemplateBody or TemplateURL. - `"UsePreviousTemplate"`: Whether to reuse the template that's associated with the stack to create the change set. """ @@ -408,6 +427,56 @@ function create_change_set( ) end +""" + create_generated_template(generated_template_name) + create_generated_template(generated_template_name, params::Dict{String,<:Any}) + +Creates a template from existing resources that are not already managed with +CloudFormation. You can check the status of the template generation using the +DescribeGeneratedTemplate API action. + +# Arguments +- `generated_template_name`: The name assigned to the generated template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Resources"`: An optional list of resources to be included in the generated template. + If no resources are specified,the template will be created without any resources. Resources + can be added to the template using the UpdateGeneratedTemplate API action. +- `"StackName"`: An optional name or ARN of a stack to use as the base stack for the + generated template. +- `"TemplateConfiguration"`: The configuration details of the generated template, including + the DeletionPolicy and UpdateReplacePolicy. +""" +function create_generated_template( + GeneratedTemplateName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "CreateGeneratedTemplate", + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_generated_template( + GeneratedTemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "CreateGeneratedTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_stack(stack_name) create_stack(stack_name, params::Dict{String,<:Any}) @@ -435,25 +504,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated - with them and edit their permissions if necessary. AWS::IAM::AccessKey - AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role - AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see - Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some - template contain macros. Macros perform custom processing on templates; this can include - simple actions like find-and-replace operations, all the way to extensive transformations - of entire templates. Because of this, users typically create a change set from the - processed template, so that they can review the changes resulting from the macros before - actually creating the stack. If your stack template contains one or more macros, and you - choose to create a stack directly from the processed template, without first reviewing the - resulting changes in a change set, you must acknowledge this capability. This includes the - AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If - you want to create a stack from a stack template that contains macros and nested stacks, - you must create the stack directly from the template using this capability. You should - only create stacks directly from a stack template that contains macros if you know what - processing the macro performs. Each macro relies on an underlying Lambda service function - for processing stack templates. Be aware that the Lambda function owner can update the - function operation without CloudFormation being notified. For more information, see Using - CloudFormation macros to perform custom processing on templates. + with them and edit their permissions if necessary. AWS::IAM::AccessKey + AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role + AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging + IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain + macros. Macros perform custom processing on templates; this can include simple actions like + find-and-replace operations, all the way to extensive transformations of entire templates. + Because of this, users typically create a change set from the processed template, so that + they can review the changes resulting from the macros before actually creating the stack. + If your stack template contains one or more macros, and you choose to create a stack + directly from the processed template, without first reviewing the resulting changes in a + change set, you must acknowledge this capability. This includes the AWS::Include and + AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to + create a stack from a stack template that contains macros and nested stacks, you must + create the stack directly from the template using this capability. You should only create + stacks directly from a stack template that contains macros if you know what processing the + macro performs. Each macro relies on an underlying Lambda service function for processing + stack templates. Be aware that the Lambda function owner can update the function operation + without CloudFormation being notified. For more information, see Using CloudFormation + macros to perform custom processing on templates. Only one of the Capabilities and + ResourceType parameters can be specified. - `"ClientRequestToken"`: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that @@ -494,7 +564,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys creation fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity - and Access Management. + and Access Management. Only one of the Capabilities and ResourceType parameters can be + specified. +- `"RetainExceptOnCreate"`: When set to true, newly created resources are deleted when the + operation rolls back. This includes newly created resources marked with a deletion policy + of Retain. Default: false - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all @@ -509,8 +583,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. - `"StackPolicyURL"`: Location of a file containing the stack policy. The URL must point to - a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You - can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. + a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. The + location for an Amazon S3 bucket must start with https://. You can specify either the + StackPolicyBody or the StackPolicyURL parameter, but not both. - `"Tags"`: Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified. @@ -521,8 +596,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template anatomy in the CloudFormation - User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL - parameter, but not both. + User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You + must specify either the TemplateBody or the TemplateURL parameter, but not both. - `"TimeoutInMinutes"`: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back. @@ -687,8 +762,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their - permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group - AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User + permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group + AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set @@ -876,6 +951,45 @@ function delete_change_set( ) end +""" + delete_generated_template(generated_template_name) + delete_generated_template(generated_template_name, params::Dict{String,<:Any}) + +Deleted a generated template. + +# Arguments +- `generated_template_name`: The name or Amazon Resource Name (ARN) of a generated template. + +""" +function delete_generated_template( + GeneratedTemplateName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "DeleteGeneratedTemplate", + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_generated_template( + GeneratedTemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "DeleteGeneratedTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stack(stack_name) delete_stack(stack_name, params::Dict{String,<:Any}) @@ -902,6 +1016,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. +- `"DeletionMode"`: Specifies the deletion mode for the stack. Possible values are: + STANDARD - Use the standard behavior. Specifying this value is the same as not specifying + this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED + state due to resource deletion failure. - `"RetainResources"`: For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, CloudFormation deletes the stack but doesn't delete the retained resources. Retaining @@ -1144,6 +1262,8 @@ in the CloudFormation User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncludePropertyValues"`: If true, the returned changes include detailed changes in the + property values. - `"NextToken"`: A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve. - `"StackName"`: If you specified the name of a change set, specify the stack name or ID @@ -1219,6 +1339,47 @@ function describe_change_set_hooks( ) end +""" + describe_generated_template(generated_template_name) + describe_generated_template(generated_template_name, params::Dict{String,<:Any}) + +Describes a generated template. The output includes details about the progress of the +creation of a generated template started by a CreateGeneratedTemplate API action or the +update of a generated template started with an UpdateGeneratedTemplate API action. + +# Arguments +- `generated_template_name`: The name or Amazon Resource Name (ARN) of a generated template. + +""" +function describe_generated_template( + GeneratedTemplateName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "DescribeGeneratedTemplate", + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_generated_template( + GeneratedTemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "DescribeGeneratedTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_organizations_access() describe_organizations_access(params::Dict{String,<:Any}) @@ -1285,6 +1446,41 @@ function describe_publisher( ) end +""" + describe_resource_scan(resource_scan_id) + describe_resource_scan(resource_scan_id, params::Dict{String,<:Any}) + +Describes details of a resource scan. + +# Arguments +- `resource_scan_id`: The Amazon Resource Name (ARN) of the resource scan. + +""" +function describe_resource_scan( + ResourceScanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "DescribeResourceScan", + Dict{String,Any}("ResourceScanId" => ResourceScanId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_resource_scan( + ResourceScanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "DescribeResourceScan", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceScanId" => ResourceScanId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_stack_drift_detection_status(stack_drift_detection_id) describe_stack_drift_detection_status(stack_drift_detection_id, params::Dict{String,<:Any}) @@ -1341,9 +1537,9 @@ end describe_stack_events(params::Dict{String,<:Any}) Returns all stack related events for a specified stack in reverse chronological order. For -more information about a stack's event history, go to Stacks in the CloudFormation User -Guide. You can list events for stacks that have failed to create or have been deleted by -specifying the unique stack identifier (stack ID). +more information about a stack's event history, see CloudFormation stack creation events in +the CloudFormation User Guide. You can list events for stacks that have failed to create +or have been deleted by specifying the unique stack identifier (stack ID). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1693,18 +1889,20 @@ end describe_stacks(params::Dict{String,<:Any}) Returns the description for the specified stack; if no stack name was specified, then it -returns the description for all the stacks created. If the stack doesn't exist, an -ValidationError is returned. +returns the description for all the stacks created. For more information about a stack's +event history, see CloudFormation stack creation events in the CloudFormation User Guide. +If the stack doesn't exist, a ValidationError is returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NextToken"`: A string that identifies the next page of stacks that you want to retrieve. - `"StackName"`: If you don't pass a parameter to StackName, the API returns a response - that describes all resources in the account. This requires ListStacks and DescribeStacks - permissions. The IAM policy below can be added to IAM policies when you want to limit - resource-level permissions and avoid returning a response when no parameter is sent in the - request: { \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\": - \"cloudformation:DescribeStacks\", \"NotResource\": + that describes all resources in the account, which can impact performance. This requires + ListStacks and DescribeStacks permissions. Consider using the ListStacks API if you're not + passing a parameter to StackName. The IAM policy below can be added to IAM policies when + you want to limit resource-level permissions and avoid returning a response when no + parameter is sent in the request: { \"Version\": \"2012-10-17\", \"Statement\": [{ + \"Effect\": \"Deny\", \"Action\": \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }] } The name or the unique stack ID that's associated with the stack, which aren't always interchangeable: Running stacks: You can specify either the stack's name or its unique stack ID. Deleted stacks: You must specify @@ -2001,8 +2199,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are passed, only TemplateBody is used. - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more - information, go to Template Anatomy in the CloudFormation User Guide. Conditional: You must - pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + information, go to Template Anatomy in the CloudFormation User Guide. The location for an + Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or + TemplateBody. If both are passed, only TemplateBody is used. """ function estimate_template_cost(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudformation( @@ -2044,7 +2243,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that CloudFormation successfully received them. - `"DisableRollback"`: Preserves the state of previously provisioned resources when an - operation fails. Default: True + operation fails. This parameter can't be specified when the OnStackFailure parameter to the + CreateChangeSet API operation was specified. True - if the stack creation fails, do + nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to + the CreateChangeSet API operation. False - if the stack creation fails, roll back the + stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the + CreateChangeSet API operation. Default: True +- `"RetainExceptOnCreate"`: When set to true, newly created resources are deleted when the + operation rolls back. This includes newly created resources marked with a deletion policy + of Retain. Default: false - `"StackName"`: If you specified the name of a change set, specify the stack name or Amazon Resource Name (ARN) that's associated with the change set you want to execute. """ @@ -2073,6 +2280,56 @@ function execute_change_set( ) end +""" + get_generated_template(generated_template_name) + get_generated_template(generated_template_name, params::Dict{String,<:Any}) + +Retrieves a generated template. If the template is in an InProgress or Pending status then +the template returned will be the template when the template was last in a Complete status. +If the template has not yet been in a Complete status then an empty template will be +returned. + +# Arguments +- `generated_template_name`: The name or Amazon Resource Name (ARN) of the generated + template. The format is + arn:{Partition}:cloudformation:{Region}:{Account}:generatedtemplate/{Id}. For example, + arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d + 692fe6dc . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Format"`: The language to use to retrieve for the generated template. Supported values + are: JSON YAML +""" +function get_generated_template( + GeneratedTemplateName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "GetGeneratedTemplate", + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_generated_template( + GeneratedTemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "GetGeneratedTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_stack_policy(stack_name) get_stack_policy(stack_name, params::Dict{String,<:Any}) @@ -2179,11 +2436,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys and a maximum length of 51,200 bytes. For more information about templates, see Template anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. +- `"TemplateSummaryConfig"`: Specifies options for the GetTemplateSummary API action. - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template anatomy in the - CloudFormation User Guide. Conditional: You must specify only one of the following - parameters: StackName, StackSetName, TemplateBody, or TemplateURL. + CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. + Conditional: You must specify only one of the following parameters: StackName, + StackSetName, TemplateBody, or TemplateURL. """ function get_template_summary(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudformation( @@ -2324,6 +2583,36 @@ function list_exports( ) end +""" + list_generated_templates() + list_generated_templates(params::Dict{String,<:Any}) + +Lists your generated templates in this Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: If the number of available results exceeds this maximum, the response + includes a NextToken value that you can use for the NextToken parameter to get the next set + of results. By default the ListGeneratedTemplates API action will return at most 50 results + in each response. The maximum value is 100. +- `"NextToken"`: A string that identifies the next page of resource scan results. +""" +function list_generated_templates(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudformation( + "ListGeneratedTemplates"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_generated_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "ListGeneratedTemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_imports(export_name) list_imports(export_name, params::Dict{String,<:Any}) @@ -2365,6 +2654,224 @@ function list_imports( ) end +""" + list_resource_scan_related_resources(resource_scan_id, resources) + list_resource_scan_related_resources(resource_scan_id, resources, params::Dict{String,<:Any}) + +Lists the related resources for a list of resources from a resource scan. The response +indicates whether each returned resource is already managed by CloudFormation. + +# Arguments +- `resource_scan_id`: The Amazon Resource Name (ARN) of the resource scan. +- `resources`: The list of resources for which you want to get the related resources. Up to + 100 resources can be provided. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: If the number of available results exceeds this maximum, the response + includes a NextToken value that you can use for the NextToken parameter to get the next set + of results. By default the ListResourceScanRelatedResources API action will return up to + 100 results in each response. The maximum value is 100. +- `"NextToken"`: A string that identifies the next page of resource scan results. +""" +function list_resource_scan_related_resources( + ResourceScanId, Resources; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "ListResourceScanRelatedResources", + Dict{String,Any}("ResourceScanId" => ResourceScanId, "Resources" => Resources); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_resource_scan_related_resources( + ResourceScanId, + Resources, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListResourceScanRelatedResources", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceScanId" => ResourceScanId, "Resources" => Resources + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_resource_scan_resources(resource_scan_id) + list_resource_scan_resources(resource_scan_id, params::Dict{String,<:Any}) + +Lists the resources from a resource scan. The results can be filtered by resource +identifier, resource type prefix, tag key, and tag value. Only resources that match all +specified filters are returned. The response indicates whether each returned resource is +already managed by CloudFormation. + +# Arguments +- `resource_scan_id`: The Amazon Resource Name (ARN) of the resource scan. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: If the number of available results exceeds this maximum, the response + includes a NextToken value that you can use for the NextToken parameter to get the next set + of results. By default the ListResourceScanResources API action will return at most 100 + results in each response. The maximum value is 100. +- `"NextToken"`: A string that identifies the next page of resource scan results. +- `"ResourceIdentifier"`: If specified, the returned resources will have the specified + resource identifier (or one of them in the case where the resource has multiple + identifiers). +- `"ResourceTypePrefix"`: If specified, the returned resources will be of any of the + resource types with the specified prefix. +- `"TagKey"`: If specified, the returned resources will have a matching tag key. +- `"TagValue"`: If specified, the returned resources will have a matching tag value. +""" +function list_resource_scan_resources( + ResourceScanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "ListResourceScanResources", + Dict{String,Any}("ResourceScanId" => ResourceScanId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_resource_scan_resources( + ResourceScanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListResourceScanResources", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceScanId" => ResourceScanId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_resource_scans() + list_resource_scans(params::Dict{String,<:Any}) + +List the resource scans from newest to oldest. By default it will return up to 10 resource +scans. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: If the number of available results exceeds this maximum, the response + includes a NextToken value that you can use for the NextToken parameter to get the next set + of results. The default value is 10. The maximum value is 100. +- `"NextToken"`: A string that identifies the next page of resource scan results. +""" +function list_resource_scans(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudformation( + "ListResourceScans"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_resource_scans( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "ListResourceScans", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_stack_instance_resource_drifts(operation_id, stack_instance_account, stack_instance_region, stack_set_name) + list_stack_instance_resource_drifts(operation_id, stack_instance_account, stack_instance_region, stack_set_name, params::Dict{String,<:Any}) + +Returns drift information for resources in a stack instance. +ListStackInstanceResourceDrifts returns drift information for the most recent drift +detection operation. If an operation is in progress, it may only return partial results. + +# Arguments +- `operation_id`: The unique ID of the drift operation. +- `stack_instance_account`: The name of the Amazon Web Services account that you want to + list resource drifts for. +- `stack_instance_region`: The name of the Region where you want to list resource drifts. +- `stack_set_name`: The name or unique ID of the stack set that you want to list drifted + resources for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CallAs"`: [Service-managed permissions] Specifies whether you are acting as an account + administrator in the organization's management account or as a delegated administrator in a + member account. By default, SELF is specified. Use SELF for stack sets with self-managed + permissions. If you are signed in to the management account, specify SELF. If you are + signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web + Services account must be registered as a delegated administrator in the management account. + For more information, see Register a delegated administrator in the CloudFormation User + Guide. +- `"MaxResults"`: The maximum number of results to be returned with a single call. If the + number of available results exceeds this maximum, the response includes a NextToken value + that you can assign to the NextToken request parameter to get the next set of results. +- `"NextToken"`: If the previous paginated request didn't return all of the remaining + results, the response object's NextToken parameter value is set to a token. To retrieve the + next set of results, call this action again and assign that token to the request object's + NextToken parameter. If there are no remaining results, the previous response object's + NextToken parameter is set to null. +- `"StackInstanceResourceDriftStatuses"`: The resource drift status of the stack instance. + DELETED: The resource differs from its expected template configuration in that the + resource has been deleted. MODIFIED: One or more resource properties differ from their + expected template values. IN_SYNC: The resource's actual configuration matches its + expected template configuration. NOT_CHECKED: CloudFormation doesn't currently return + this value. +""" +function list_stack_instance_resource_drifts( + OperationId, + StackInstanceAccount, + StackInstanceRegion, + StackSetName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListStackInstanceResourceDrifts", + Dict{String,Any}( + "OperationId" => OperationId, + "StackInstanceAccount" => StackInstanceAccount, + "StackInstanceRegion" => StackInstanceRegion, + "StackSetName" => StackSetName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_stack_instance_resource_drifts( + OperationId, + StackInstanceAccount, + StackInstanceRegion, + StackSetName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListStackInstanceResourceDrifts", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "OperationId" => OperationId, + "StackInstanceAccount" => StackInstanceAccount, + "StackInstanceRegion" => StackInstanceRegion, + "StackSetName" => StackSetName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stack_instances(stack_set_name) list_stack_instances(stack_set_name, params::Dict{String,<:Any}) @@ -2467,6 +2974,56 @@ function list_stack_resources( ) end +""" + list_stack_set_auto_deployment_targets(stack_set_name) + list_stack_set_auto_deployment_targets(stack_set_name, params::Dict{String,<:Any}) + +Returns summary information about deployment targets for a stack set. + +# Arguments +- `stack_set_name`: The name or unique ID of the stack set that you want to get automatic + deployment targets for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CallAs"`: Specifies whether you are acting as an account administrator in the + organization's management account or as a delegated administrator in a member account. By + default, SELF is specified. Use SELF for StackSets with self-managed permissions. If you + are signed in to the management account, specify SELF. If you are signed in to a + delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account + must be registered as a delegated administrator in the management account. For more + information, see Register a delegated administrator in the CloudFormation User Guide. +- `"MaxResults"`: The maximum number of results to be returned with a single call. If the + number of available results exceeds this maximum, the response includes a NextToken value + that you can assign to the NextToken request parameter to get the next set of results. +- `"NextToken"`: A string that identifies the next page of stack set deployment targets + that you want to retrieve. +""" +function list_stack_set_auto_deployment_targets( + StackSetName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "ListStackSetAutoDeploymentTargets", + Dict{String,Any}("StackSetName" => StackSetName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_stack_set_auto_deployment_targets( + StackSetName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListStackSetAutoDeploymentTargets", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("StackSetName" => StackSetName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stack_set_operation_results(operation_id, stack_set_name) list_stack_set_operation_results(operation_id, stack_set_name, params::Dict{String,<:Any}) @@ -2965,7 +3522,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys if the request is submitted multiple times. - `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension. For CloudFormation to assume the specified execution - role, the role must contain a trust relationship with the CloudFormation service principle + role, the role must contain a trust relationship with the CloudFormation service principal (resources.cloudformation.amazonaws.com). For more information about adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide. If your extension calls Amazon Web Services APIs in any of its handlers, you must @@ -3029,6 +3586,9 @@ UPDATE_ROLLBACK_COMPLETE IMPORT_COMPLETE IMPORT_ROLLBACK_COMPLETE # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for this RollbackStack request. +- `"RetainExceptOnCreate"`: When set to true, newly created resources are deleted when the + operation rolls back. This includes newly created resources marked with a deletion policy + of Retain. Default: false - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the stack. """ @@ -3071,8 +3631,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys either the StackPolicyBody or the StackPolicyURL parameter, but not both. - `"StackPolicyURL"`: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web - Services Region as the stack. You can specify either the StackPolicyBody or the - StackPolicyURL parameter, but not both. + Services Region as the stack. The location for an Amazon S3 bucket must start with + https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but + not both. """ function set_stack_policy(StackName; aws_config::AbstractAWSConfig=global_aws_config()) return cloudformation( @@ -3124,11 +3685,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Type"`: The type of extension. Conditional: You must specify ConfigurationArn, or Type and TypeName. - `"TypeArn"`: The Amazon Resource Name (ARN) for the extension, in this account and - Region. For public extensions, this will be the ARN assigned when you activate the type in - this account and Region. For private extensions, this will be the ARN assigned when you - register the type in this account and Region. Do not include the extension versions suffix - at the end of the ARN. You can set the configuration for an extension, but not for a - specific extension version. + Region. For public extensions, this will be the ARN assigned when you call the ActivateType + API operation in this account and Region. For private extensions, this will be the ARN + assigned when you call the RegisterType API operation in this account and Region. Do not + include the extension versions suffix at the end of the ARN. You can set the configuration + for an extension, but not for a specific extension version. - `"TypeName"`: The name of the extension. Conditional: You must specify ConfigurationArn, or Type and TypeName. """ @@ -3262,6 +3823,32 @@ function signal_resource( ) end +""" + start_resource_scan() + start_resource_scan(params::Dict{String,<:Any}) + +Starts a scan of the resources in this account in this Region. You can the status of a scan +using the ListResourceScans API action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: A unique identifier for this StartResourceScan request. Specify + this token if you plan to retry requests so that CloudFormation knows that you're not + attempting to start a new resource scan. +""" +function start_resource_scan(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudformation( + "StartResourceScan"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function start_resource_scan( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "StartResourceScan", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ stop_stack_set_operation(operation_id, stack_set_name) stop_stack_set_operation(operation_id, stack_set_name, params::Dict{String,<:Any}) @@ -3368,6 +3955,60 @@ function test_type( ) end +""" + update_generated_template(generated_template_name) + update_generated_template(generated_template_name, params::Dict{String,<:Any}) + +Updates a generated template. This can be used to change the name, add and remove +resources, refresh resources, and change the DeletionPolicy and UpdateReplacePolicy +settings. You can check the status of the update to the generated template using the +DescribeGeneratedTemplate API action. + +# Arguments +- `generated_template_name`: The name or Amazon Resource Name (ARN) of a generated template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AddResources"`: An optional list of resources to be added to the generated template. +- `"NewGeneratedTemplateName"`: An optional new name to assign to the generated template. +- `"RefreshAllResources"`: If true, update the resource properties in the generated + template with their current live state. This feature is useful when the resource properties + in your generated a template does not reflect the live state of the resource properties. + This happens when a user update the resource properties after generating a template. +- `"RemoveResources"`: A list of logical ids for resources to remove from the generated + template. +- `"TemplateConfiguration"`: The configuration details of the generated template, including + the DeletionPolicy and UpdateReplacePolicy. +""" +function update_generated_template( + GeneratedTemplateName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudformation( + "UpdateGeneratedTemplate", + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_generated_template( + GeneratedTemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "UpdateGeneratedTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GeneratedTemplateName" => GeneratedTemplateName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_stack(stack_name) update_stack(stack_name, params::Dict{String,<:Any}) @@ -3395,24 +4036,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey - AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role - AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see - Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some - template contain macros. Macros perform custom processing on templates; this can include - simple actions like find-and-replace operations, all the way to extensive transformations - of entire templates. Because of this, users typically create a change set from the - processed template, so that they can review the changes resulting from the macros before - actually updating the stack. If your stack template contains one or more macros, and you - choose to update a stack directly from the processed template, without first reviewing the - resulting changes in a change set, you must acknowledge this capability. This includes the - AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If - you want to update a stack from a stack template that contains macros and nested stacks, - you must update the stack directly from the template using this capability. You should - only update stacks directly from a stack template that contains macros if you know what - processing the macro performs. Each macro relies on an underlying Lambda service function - for processing stack templates. Be aware that the Lambda function owner can update the - function operation without CloudFormation being notified. For more information, see Using - CloudFormation Macros to Perform Custom Processing on Templates. + AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role + AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging + IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain + macros. Macros perform custom processing on templates; this can include simple actions like + find-and-replace operations, all the way to extensive transformations of entire templates. + Because of this, users typically create a change set from the processed template, so that + they can review the changes resulting from the macros before actually updating the stack. + If your stack template contains one or more macros, and you choose to update a stack + directly from the processed template, without first reviewing the resulting changes in a + change set, you must acknowledge this capability. This includes the AWS::Include and + AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to + update a stack from a stack template that contains macros and nested stacks, you must + update the stack directly from the template using this capability. You should only update + stacks directly from a stack template that contains macros if you know what processing the + macro performs. Each macro relies on an underlying Lambda service function for processing + stack templates. Be aware that the Lambda function owner can update the function operation + without CloudFormation being notified. For more information, see Using CloudFormation + Macros to Perform Custom Processing on Templates. Only one of the Capabilities and + ResourceType parameters can be specified. - `"ClientRequestToken"`: A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that @@ -3439,7 +4081,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see - Controlling Access with Identity and Access Management. + Controlling Access with Identity and Access Management. Only one of the Capabilities and + ResourceType parameters can be specified. +- `"RetainExceptOnCreate"`: When set to true, newly created resources are deleted when the + operation rolls back. This includes newly created resources marked with a deletion policy + of Retain. Default: false - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all @@ -3462,16 +4108,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specify a stack policy, the current policy that is associated with the stack will be used. - `"StackPolicyDuringUpdateURL"`: Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in - the same Region as the stack. You can specify either the StackPolicyDuringUpdateBody or the + the same Region as the stack. The location for an Amazon S3 bucket must start with + https://. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both. If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used. - `"StackPolicyURL"`: Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. - You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. - You might update the stack policy, for example, in order to protect a new resource that you - created during a stack update. If you don't specify a stack policy, the current policy that - is associated with the stack is unchanged. + The location for an Amazon S3 bucket must start with https://. You can specify either the + StackPolicyBody or the StackPolicyURL parameter, but not both. You might update the stack + policy, for example, in order to protect a new resource that you created during a stack + update. If you don't specify a stack policy, the current policy that is associated with the + stack is unchanged. - `"Tags"`: Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags. If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. @@ -3482,9 +4130,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more - information, go to Template Anatomy in the CloudFormation User Guide. Conditional: You must - specify only one of the following parameters: TemplateBody, TemplateURL, or set the - UsePreviousTemplate to true. + information, go to Template Anatomy in the CloudFormation User Guide. The location for an + Amazon S3 bucket must start with https://. Conditional: You must specify only one of the + following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. - `"UsePreviousTemplate"`: Reuse the existing template that is associated with the stack that you are updating. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. @@ -3677,9 +4325,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their - permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group - AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User - AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in + permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group + AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User + AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a @@ -3868,8 +4516,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User - Guide. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only - TemplateBody is used. + Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must + pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. """ function validate_template(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudformation( diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 9592ea1e62..21bf5b85ff 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -59,7 +59,8 @@ distribution. A staging distribution is a copy of an existing distribution (call primary distribution) that you can use in a continuous deployment workflow. After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to -incrementally move traffic to the staging distribution. +incrementally move traffic to the staging distribution. This API operation requires the +following IAM permissions: GetDistribution CreateDistribution CopyDistribution # Arguments - `caller_reference`: A value that uniquely identifies a request to create a resource. This @@ -70,6 +71,10 @@ incrementally move traffic to the staging distribution. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Enabled"`: A Boolean flag to specify the state of the staging distribution when it's + created. When you set this value to True, the staging distribution is enabled. When you set + this value to False, the staging distribution is disabled. If you omit this field, the + default value is True. - `"If-Match"`: The version identifier of the primary distribution whose configuration you are copying. This is the ETag value returned in the response to GetDistribution and GetDistributionConfig. @@ -305,7 +310,8 @@ end create_distribution_with_tags2020_05_31(distribution_config_with_tags) create_distribution_with_tags2020_05_31(distribution_config_with_tags, params::Dict{String,<:Any}) -Create a new distribution with tags. +Create a new distribution with tags. This API operation requires the following IAM +permissions: CreateDistribution TagResource # Arguments - `distribution_config_with_tags`: The distribution's configuration information. @@ -501,7 +507,8 @@ end create_invalidation2020_05_31(distribution_id, invalidation_batch) create_invalidation2020_05_31(distribution_id, invalidation_batch, params::Dict{String,<:Any}) -Create a new invalidation. +Create a new invalidation. For more information, see Invalidating files in the Amazon +CloudFront Developer Guide. # Arguments - `distribution_id`: The distribution's id. @@ -583,6 +590,47 @@ function create_key_group2020_05_31( ) end +""" + create_key_value_store2020_05_31(name) + create_key_value_store2020_05_31(name, params::Dict{String,<:Any}) + +Specifies the key value store resource to add to your account. In your account, the key +value store names must be unique. You can also import key value store data in JSON format +from an S3 bucket by providing a valid ImportSource that you own. + +# Arguments +- `name`: The name of the key value store. The minimum length is 1 character and the + maximum length is 64 characters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Comment"`: The comment of the key value store. +- `"ImportSource"`: The S3 bucket that provides the source for the import. The source must + be in a valid JSON format. +""" +function create_key_value_store2020_05_31( + Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "POST", + "/2020-05-31/key-value-store/", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_key_value_store2020_05_31( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "POST", + "/2020-05-31/key-value-store/", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_monitoring_subscription2020_05_31(distribution_id, monitoring_subscription) create_monitoring_subscription2020_05_31(distribution_id, monitoring_subscription, params::Dict{String,<:Any}) @@ -787,9 +835,9 @@ Real-time logs in the Amazon CloudFront Developer Guide. about fields, see Real-time log configuration fields in the Amazon CloudFront Developer Guide. - `name`: A unique name to identify this real-time log configuration. -- `sampling_rate`: The sampling rate for this real-time log configuration. The sampling - rate determines the percentage of viewer requests that are represented in the real-time log - data. You must provide an integer between 1 and 100, inclusive. +- `sampling_rate`: The sampling rate for this real-time log configuration. You can specify + a whole number between 1 and 100 (inclusive) to determine the percentage of viewer requests + that are represented in the real-time log data. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1298,6 +1346,49 @@ function delete_key_group2020_05_31( ) end +""" + delete_key_value_store2020_05_31(if-_match, name) + delete_key_value_store2020_05_31(if-_match, name, params::Dict{String,<:Any}) + +Specifies the key value store to delete. + +# Arguments +- `if-_match`: The key value store to delete, if a match occurs. +- `name`: The name of the key value store. + +""" +function delete_key_value_store2020_05_31( + If_Match, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "DELETE", + "/2020-05-31/key-value-store/$(Name)", + Dict{String,Any}("headers" => Dict{String,Any}("If-Match" => If_Match)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_key_value_store2020_05_31( + If_Match, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudfront( + "DELETE", + "/2020-05-31/key-value-store/$(Name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("headers" => Dict{String,Any}("If-Match" => If_Match)), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_monitoring_subscription2020_05_31(distribution_id) delete_monitoring_subscription2020_05_31(distribution_id, params::Dict{String,<:Any}) @@ -1618,6 +1709,38 @@ function describe_function2020_05_31( ) end +""" + describe_key_value_store2020_05_31(name) + describe_key_value_store2020_05_31(name, params::Dict{String,<:Any}) + +Specifies the key value store and its configuration. + +# Arguments +- `name`: The name of the key value store. + +""" +function describe_key_value_store2020_05_31( + Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "GET", + "/2020-05-31/key-value-store/$(Name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_key_value_store2020_05_31( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "GET", + "/2020-05-31/key-value-store/$(Name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_cache_policy2020_05_31(id) get_cache_policy2020_05_31(id, params::Dict{String,<:Any}) @@ -3050,7 +3173,11 @@ List the distributions that are associated with a specified WAF web ACL. # Arguments - `web_aclid`: The ID of the WAF web ACL that you want to list the associated distributions. If you specify \"null\" for the ID, the request returns a list of the - distributions that aren't associated with a web ACL. + distributions that aren't associated with a web ACL. For WAFV2, this is the ARN of the web + ACL, such as + arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXA + MPLE11111. For WAF Classic, this is the ID of the web ACL, such as + a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3286,6 +3413,40 @@ function list_key_groups2020_05_31( ) end +""" + list_key_value_stores2020_05_31() + list_key_value_stores2020_05_31(params::Dict{String,<:Any}) + +Specifies the key value stores to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The marker associated with the key value stores list. +- `"MaxItems"`: The maximum number of items in the key value stores list. +- `"Status"`: The status of the request for the key value stores list. +""" +function list_key_value_stores2020_05_31(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "GET", + "/2020-05-31/key-value-store"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_key_value_stores2020_05_31( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "GET", + "/2020-05-31/key-value-store", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_origin_access_controls2020_05_31() list_origin_access_controls2020_05_31(params::Dict{String,<:Any}) @@ -3998,7 +4159,8 @@ distribution. After using a continuous deployment policy to move a portion of yo name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your -domain's traffic back to the primary distribution. +domain's traffic back to the primary distribution. This API operation requires the +following IAM permissions: GetDistribution UpdateDistribution # Arguments - `id`: The identifier of the primary distribution to which you are copying a staging @@ -4248,6 +4410,56 @@ function update_key_group2020_05_31( ) end +""" + update_key_value_store2020_05_31(comment, if-_match, name) + update_key_value_store2020_05_31(comment, if-_match, name, params::Dict{String,<:Any}) + +Specifies the key value store to update. + +# Arguments +- `comment`: The comment of the key value store to update. +- `if-_match`: The key value store to update, if a match occurs. +- `name`: The name of the key value store to update. + +""" +function update_key_value_store2020_05_31( + Comment, If_Match, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudfront( + "PUT", + "/2020-05-31/key-value-store/$(Name)", + Dict{String,Any}( + "Comment" => Comment, "headers" => Dict{String,Any}("If-Match" => If_Match) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_key_value_store2020_05_31( + Comment, + If_Match, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudfront( + "PUT", + "/2020-05-31/key-value-store/$(Name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Comment" => Comment, + "headers" => Dict{String,Any}("If-Match" => If_Match), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_origin_access_control2020_05_31(id, origin_access_control_config) update_origin_access_control2020_05_31(id, origin_access_control_config, params::Dict{String,<:Any}) diff --git a/src/services/cloudhsm_v2.jl b/src/services/cloudhsm_v2.jl index aaf6ff99a2..55326e9439 100644 --- a/src/services/cloudhsm_v2.jl +++ b/src/services/cloudhsm_v2.jl @@ -60,8 +60,8 @@ end Creates a new AWS CloudHSM cluster. # Arguments -- `hsm_type`: The type of HSM to use in the cluster. Currently the only allowed value is - hsm1.medium. +- `hsm_type`: The type of HSM to use in the cluster. The allowed values are hsm1.medium and + hsm2m.medium. - `subnet_ids`: The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria: All subnets must be in the same virtual private cloud (VPC). You @@ -70,6 +70,7 @@ Creates a new AWS CloudHSM cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BackupRetentionPolicy"`: A policy that defines how the service retains backups. +- `"Mode"`: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. - `"SourceBackupId"`: The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups. diff --git a/src/services/cloudtrail.jl b/src/services/cloudtrail.jl index c8ced3b70b..232de03e75 100644 --- a/src/services/cloudtrail.jl +++ b/src/services/cloudtrail.jl @@ -167,8 +167,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide. For more information about how to use advanced event - selectors to include non-Amazon Web Services events in your event data store, see Create an - integration to log events from outside Amazon Web Services in the CloudTrail User Guide. + selectors to include events outside of Amazon Web Services events in your event data store, + see Create an integration to log events from outside Amazon Web Services in the CloudTrail + User Guide. +- `"BillingMode"`: The billing mode for the event data store determines the cost for + ingesting events and the default and maximum retention period for the event data store. The + following are the possible values: EXTENDABLE_RETENTION_PRICING - This billing mode is + generally recommended if you want a flexible retention period of up to 3653 days (about 10 + years). The default retention period for this billing mode is 366 days. + FIXED_RETENTION_PRICING - This billing mode is recommended if you expect to ingest more + than 25 TB of event data per month and need a retention period of up to 2557 days (about 7 + years). The default retention period for this billing mode is 2557 days. The default + value is EXTENDABLE_RETENTION_PRICING. For more information about CloudTrail pricing, see + CloudTrail Pricing and Managing CloudTrail Lake costs. - `"KmsKeyId"`: Specifies the KMS key ID to use to encrypt the events delivered by CloudTrail. The value can be an alias name prefixed by alias/, a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Disabling or @@ -187,8 +198,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Regions, or only from the Region in which the event data store is created. - `"OrganizationEnabled"`: Specifies whether an event data store collects events logged for an organization in Organizations. -- `"RetentionPeriod"`: The retention period of the event data store, in days. You can set a - retention period of up to 2557 days, the equivalent of seven years. +- `"RetentionPeriod"`: The retention period of the event data store, in days. If + BillingMode is set to EXTENDABLE_RETENTION_PRICING, you can set a retention period of up to + 3653 days, the equivalent of 10 years. If BillingMode is set to FIXED_RETENTION_PRICING, + you can set a retention period of up to 2557 days, the equivalent of seven years. + CloudTrail Lake determines whether to retain an event by checking if the eventTime of the + event is within the specified retention period. For example, if you set a retention period + of 90 days, CloudTrail will remove events when the eventTime is older than 90 days. If you + plan to copy trail events to this event data store, we recommend that you consider both the + age of the events that you want to copy as well as how long you want to keep the copied + events in your event data store. For example, if you copy trail events that are 5 years old + and specify a retention period of 7 years, the event data store will retain those events + for two years. - `"StartIngestion"`: Specifies whether the event data store should start ingesting live events. The default is true. - `"TagsList"`: @@ -230,7 +251,8 @@ bucket. my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) - `s3_bucket_name`: Specifies the name of the Amazon S3 bucket designated for publishing - log files. See Amazon S3 Bucket Naming Requirements. + log files. For information about bucket naming rules, see Bucket naming rules in the Amazon + Simple Storage Service User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -337,11 +359,12 @@ end Disables the event data store specified by EventDataStore, which accepts an event data store ARN. After you run DeleteEventDataStore, the event data store enters a PENDING_DELETION state, and is automatically deleted after a wait period of seven days. -TerminationProtectionEnabled must be set to False on the event data store; this operation -cannot work if TerminationProtectionEnabled is True. After you run DeleteEventDataStore on -an event data store, you cannot run ListQueries, DescribeQuery, or GetQueryResults on -queries that are using an event data store in a PENDING_DELETION state. An event data store -in the PENDING_DELETION state does not incur costs. +TerminationProtectionEnabled must be set to False on the event data store and the +FederationStatus must be DISABLED. You cannot delete an event data store if +TerminationProtectionEnabled is True or the FederationStatus is ENABLED. After you run +DeleteEventDataStore on an event data store, you cannot run ListQueries, DescribeQuery, or +GetQueryResults on queries that are using an event data store in a PENDING_DELETION state. +An event data store in the PENDING_DELETION state does not incur costs. # Arguments - `event_data_store`: The ARN (or the ID suffix of the ARN) of the event data store to @@ -553,6 +576,106 @@ function describe_trails( ) end +""" + disable_federation(event_data_store) + disable_federation(event_data_store, params::Dict{String,<:Any}) + + Disables Lake query federation on the specified event data store. When you disable +federation, CloudTrail disables the integration with Glue, Lake Formation, and Amazon +Athena. After disabling Lake query federation, you can no longer query your event data in +Amazon Athena. No CloudTrail Lake data is deleted when you disable federation and you can +continue to run queries in CloudTrail Lake. + +# Arguments +- `event_data_store`: The ARN (or ID suffix of the ARN) of the event data store for which + you want to disable Lake query federation. + +""" +function disable_federation( + EventDataStore; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudtrail( + "DisableFederation", + Dict{String,Any}("EventDataStore" => EventDataStore); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_federation( + EventDataStore, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudtrail( + "DisableFederation", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("EventDataStore" => EventDataStore), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + enable_federation(event_data_store, federation_role_arn) + enable_federation(event_data_store, federation_role_arn, params::Dict{String,<:Any}) + + Enables Lake query federation on the specified event data store. Federating an event data +store lets you view the metadata associated with the event data store in the Glue Data +Catalog and run SQL queries against your event data using Amazon Athena. The table metadata +stored in the Glue Data Catalog lets the Athena query engine know how to find, read, and +process the data that you want to query. When you enable Lake query federation, CloudTrail +creates a managed database named aws:cloudtrail (if the database doesn't already exist) and +a managed federated table in the Glue Data Catalog. The event data store ID is used for the +table name. CloudTrail registers the role ARN and event data store in Lake Formation, the +service responsible for allowing fine-grained access control of the federated resources in +the Glue Data Catalog. For more information about Lake query federation, see Federate an +event data store. + +# Arguments +- `event_data_store`: The ARN (or ID suffix of the ARN) of the event data store for which + you want to enable Lake query federation. +- `federation_role_arn`: The ARN of the federation role to use for the event data store. + Amazon Web Services services like Lake Formation use this federation role to access data + for the federated event data store. The federation role must exist in your account and + provide the required minimum permissions. + +""" +function enable_federation( + EventDataStore, FederationRoleArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudtrail( + "EnableFederation", + Dict{String,Any}( + "EventDataStore" => EventDataStore, "FederationRoleArn" => FederationRoleArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_federation( + EventDataStore, + FederationRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudtrail( + "EnableFederation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EventDataStore" => EventDataStore, + "FederationRoleArn" => FederationRoleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_channel(channel) get_channel(channel, params::Dict{String,<:Any}) @@ -698,44 +821,44 @@ function get_import( end """ - get_insight_selectors(trail_name) - get_insight_selectors(trail_name, params::Dict{String,<:Any}) + get_insight_selectors() + get_insight_selectors(params::Dict{String,<:Any}) -Describes the settings for the Insights event selectors that you configured for your trail. -GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail, and -if it is, which insight types are enabled. If you run GetInsightSelectors on a trail that -does not have Insights events enabled, the operation throws the exception -InsightNotEnabledException For more information, see Logging CloudTrail Insights Events -for Trails in the CloudTrail User Guide. +Describes the settings for the Insights event selectors that you configured for your trail +or event data store. GetInsightSelectors shows if CloudTrail Insights event logging is +enabled on the trail or event data store, and if it is, which Insights types are enabled. +If you run GetInsightSelectors on a trail or event data store that does not have Insights +events enabled, the operation throws the exception InsightNotEnabledException Specify +either the EventDataStore parameter to get Insights event selectors for an event data +store, or the TrailName parameter to the get Insights event selectors for a trail. You +cannot specify these parameters together. For more information, see Logging CloudTrail +Insights events in the CloudTrail User Guide. -# Arguments -- `trail_name`: Specifies the name of the trail or trail ARN. If you specify a trail name, +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EventDataStore"`: Specifies the ARN (or ID suffix of the ARN) of the event data store + for which you want to get Insights selectors. You cannot use this parameter with the + TrailName parameter. +- `"TrailName"`: Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it - must be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail - + must be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail You cannot + use this parameter with the EventDataStore parameter. """ -function get_insight_selectors(TrailName; aws_config::AbstractAWSConfig=global_aws_config()) +function get_insight_selectors(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudtrail( - "GetInsightSelectors", - Dict{String,Any}("TrailName" => TrailName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "GetInsightSelectors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function get_insight_selectors( - TrailName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return cloudtrail( "GetInsightSelectors", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("TrailName" => TrailName), params) - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -994,6 +1117,90 @@ function list_imports( ) end +""" + list_insights_metric_data(event_name, event_source, insight_type) + list_insights_metric_data(event_name, event_source, insight_type, params::Dict{String,<:Any}) + +Returns Insights metrics data for trails that have enabled Insights. The request must +include the EventSource, EventName, and InsightType parameters. If the InsightType is set +to ApiErrorRateInsight, the request must also include the ErrorCode parameter. The +following are the available time periods for ListInsightsMetricData. Each cutoff is +inclusive. Data points with a period of 60 seconds (1-minute) are available for 15 days. + Data points with a period of 300 seconds (5-minute) are available for 63 days. Data +points with a period of 3600 seconds (1 hour) are available for 90 days. Access to the +ListInsightsMetricData API operation is linked to the cloudtrail:LookupEvents action. To +use this operation, you must have permissions to perform the cloudtrail:LookupEvents action. + +# Arguments +- `event_name`: The name of the event, typically the Amazon Web Services API on which + unusual levels of activity were recorded. +- `event_source`: The Amazon Web Services service to which the request was made, such as + iam.amazonaws.com or s3.amazonaws.com. +- `insight_type`: The type of CloudTrail Insights event, which is either ApiCallRateInsight + or ApiErrorRateInsight. The ApiCallRateInsight Insights type analyzes write-only management + API calls that are aggregated per minute against a baseline API call volume. The + ApiErrorRateInsight Insights type analyzes management API calls that result in error codes. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DataType"`: Type of datapoints to return. Valid values are NonZeroData and + FillWithZeros. The default is NonZeroData. +- `"EndTime"`: Specifies, in UTC, the end time for time-series data. The value specified is + exclusive; results include data points up to the specified time stamp. The default is the + time of request. +- `"ErrorCode"`: Conditionally required if the InsightType parameter is set to + ApiErrorRateInsight. If returning metrics for the ApiErrorRateInsight Insights type, this + is the error to retrieve data for. For example, AccessDenied. +- `"MaxResults"`: The maximum number of datapoints to return. Valid values are integers + from 1 to 21600. The default value is 21600. +- `"NextToken"`: Returned if all datapoints can't be returned in a single call. For + example, due to reaching MaxResults. Add this parameter to the request to continue + retrieving results starting from the last evaluated point. +- `"Period"`: Granularity of data to retrieve, in seconds. Valid values are 60, 300, and + 3600. If you specify any other value, you will get an error. The default is 3600 seconds. +- `"StartTime"`: Specifies, in UTC, the start time for time-series data. The value + specified is inclusive; results include data points with the specified time stamp. The + default is 90 days before the time of request. +""" +function list_insights_metric_data( + EventName, EventSource, InsightType; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudtrail( + "ListInsightsMetricData", + Dict{String,Any}( + "EventName" => EventName, + "EventSource" => EventSource, + "InsightType" => InsightType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_insights_metric_data( + EventName, + EventSource, + InsightType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudtrail( + "ListInsightsMetricData", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EventName" => EventName, + "EventSource" => EventSource, + "InsightType" => InsightType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_public_keys() list_public_keys(params::Dict{String,<:Any}) @@ -1146,14 +1353,17 @@ end lookup_events(params::Dict{String,<:Any}) Looks up management events or CloudTrail Insights events that are captured by CloudTrail. -You can look up events that occurred in a Region within the last 90 days. Lookup supports -the following attributes for management events: Amazon Web Services access key Event ID - Event name Event source Read only Resource name Resource type User name -Lookup supports the following attributes for Insights events: Event ID Event name -Event source All attributes are optional. The default number of results returned is 50, -with a maximum of 50 possible. The response includes a token that you can use to get the -next page of results. The rate of lookup requests is limited to two per second, per -account, per Region. If this limit is exceeded, a throttling error occurs. +You can look up events that occurred in a Region within the last 90 days. LookupEvents +returns recent Insights events for trails that enable Insights. To view Insights events for +an event data store, you can run queries on your Insights event data store, and you can +also view the Lake dashboard for Insights. Lookup supports the following attributes for +management events: Amazon Web Services access key Event ID Event name Event source + Read only Resource name Resource type User name Lookup supports the following +attributes for Insights events: Event ID Event name Event source All attributes are +optional. The default number of results returned is 50, with a maximum of 50 possible. The +response includes a token that you can use to get the next page of results. The rate of +lookup requests is limited to two per second, per account, per Region. If this limit is +exceeded, a throttling error occurs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1195,27 +1405,27 @@ Configures an event selector or advanced event selectors for your trail. Use eve selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more -information about logging Insights events, see Logging Insights events for trails in the -CloudTrail User Guide. By default, trails created without specific event selectors are -configured to log all read and write management events, and no data events. When an event -occurs in your account, CloudTrail evaluates the event selectors or advanced event -selectors in all trails. For each trail, if the event matches any event selector, the trail -processes and logs the event. If the event doesn't match any event selector, the trail -doesn't log the event. Example You create an event selector for a trail and specify that -you want write-only events. The EC2 GetConsoleOutput and RunInstances API operations -occur in your account. CloudTrail evaluates whether the events match your event -selectors. The RunInstances is a write-only event and it matches your event selector. The -trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your -event selector. The trail doesn't log the event. The PutEventSelectors operation must be -called from the Region in which the trail was created; otherwise, an -InvalidHomeRegionException exception is thrown. You can configure up to five event -selectors for each trail. For more information, see Logging management events, Logging data -events, and Quotas in CloudTrail in the CloudTrail User Guide. You can add advanced event -selectors, and conditions for your advanced event selectors, up to a maximum of 500 values -for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or -EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing -EventSelectors are overwritten. For more information about advanced event selectors, see -Logging data events in the CloudTrail User Guide. +information about logging Insights events, see Logging Insights events in the CloudTrail +User Guide. By default, trails created without specific event selectors are configured to +log all read and write management events, and no data events. When an event occurs in your +account, CloudTrail evaluates the event selectors or advanced event selectors in all +trails. For each trail, if the event matches any event selector, the trail processes and +logs the event. If the event doesn't match any event selector, the trail doesn't log the +event. Example You create an event selector for a trail and specify that you want +write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your +account. CloudTrail evaluates whether the events match your event selectors. The +RunInstances is a write-only event and it matches your event selector. The trail logs the +event. The GetConsoleOutput is a read-only event that doesn't match your event selector. +The trail doesn't log the event. The PutEventSelectors operation must be called from the +Region in which the trail was created; otherwise, an InvalidHomeRegionException exception +is thrown. You can configure up to five event selectors for each trail. For more +information, see Logging management events, Logging data events, and Quotas in CloudTrail +in the CloudTrail User Guide. You can add advanced event selectors, and conditions for your +advanced event selectors, up to a maximum of 500 values for all conditions and selectors on +a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you +apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For +more information about advanced event selectors, see Logging data events in the CloudTrail +User Guide. # Arguments - `trail_name`: Specifies the name of the trail or trail ARN. If you specify a trail name, @@ -1263,41 +1473,62 @@ function put_event_selectors( end """ - put_insight_selectors(insight_selectors, trail_name) - put_insight_selectors(insight_selectors, trail_name, params::Dict{String,<:Any}) + put_insight_selectors(insight_selectors) + put_insight_selectors(insight_selectors, params::Dict{String,<:Any}) Lets you enable Insights event logging by specifying the Insights selectors that you want -to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event -logging, by passing an empty list of insight types. The valid Insights event types in this -release are ApiErrorRateInsight and ApiCallRateInsight. To log CloudTrail Insights events -on API call volume, the trail must log write management events. To log CloudTrail Insights -events on API error rate, the trail must log read or write management events. You can call -GetEventSelectors on a trail to check whether the trail logs management events. +to enable on an existing trail or event data store. You also use PutInsightSelectors to +turn off Insights event logging, by passing an empty list of Insights types. The valid +Insights event types are ApiErrorRateInsight and ApiCallRateInsight. To enable Insights on +an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source +event data store (EventDataStore) and the destination event data store +(InsightsDestination). The source event data store logs management events and enables +Insights. The destination event data store logs Insights events based upon the management +event activity of the source event data store. The source and destination event data stores +must belong to the same Amazon Web Services account. To log Insights events for a trail, +you must specify the name (TrailName) of the CloudTrail trail for which you want to change +or add Insights selectors. To log CloudTrail Insights events on API call volume, the trail +or event data store must log write management events. To log CloudTrail Insights events on +API error rate, the trail or event data store must log read or write management events. You +can call GetEventSelectors on a trail to check whether the trail logs management events. +You can call GetEventDataStore on an event data store to check whether the event data store +logs management events. For more information, see Logging CloudTrail Insights events in the +CloudTrail User Guide. # Arguments -- `insight_selectors`: A JSON string that contains the insight types you want to log on a - trail. ApiCallRateInsight and ApiErrorRateInsight are valid Insight types. The - ApiCallRateInsight Insights type analyzes write-only management API calls that are - aggregated per minute against a baseline API call volume. The ApiErrorRateInsight Insights - type analyzes management API calls that result in error codes. The error is shown if the - API call is unsuccessful. -- `trail_name`: The name of the CloudTrail trail for which you want to change or add - Insights selectors. +- `insight_selectors`: A JSON string that contains the Insights types you want to log on a + trail or event data store. ApiCallRateInsight and ApiErrorRateInsight are valid Insight + types. The ApiCallRateInsight Insights type analyzes write-only management API calls that + are aggregated per minute against a baseline API call volume. The ApiErrorRateInsight + Insights type analyzes management API calls that result in error codes. The error is shown + if the API call is unsuccessful. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EventDataStore"`: The ARN (or ID suffix of the ARN) of the source event data store for + which you want to change or add Insights selectors. To enable Insights on an event data + store, you must provide both the EventDataStore and InsightsDestination parameters. You + cannot use this parameter with the TrailName parameter. +- `"InsightsDestination"`: The ARN (or ID suffix of the ARN) of the destination event data + store that logs Insights events. To enable Insights on an event data store, you must + provide both the EventDataStore and InsightsDestination parameters. You cannot use this + parameter with the TrailName parameter. +- `"TrailName"`: The name of the CloudTrail trail for which you want to change or add + Insights selectors. You cannot use this parameter with the EventDataStore and + InsightsDestination parameters. """ function put_insight_selectors( - InsightSelectors, TrailName; aws_config::AbstractAWSConfig=global_aws_config() + InsightSelectors; aws_config::AbstractAWSConfig=global_aws_config() ) return cloudtrail( "PutInsightSelectors", - Dict{String,Any}("InsightSelectors" => InsightSelectors, "TrailName" => TrailName); + Dict{String,Any}("InsightSelectors" => InsightSelectors); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function put_insight_selectors( InsightSelectors, - TrailName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1305,11 +1536,7 @@ function put_insight_selectors( "PutInsightSelectors", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "InsightSelectors" => InsightSelectors, "TrailName" => TrailName - ), - params, + _merge, Dict{String,Any}("InsightSelectors" => InsightSelectors), params ), ); aws_config=aws_config, @@ -1539,14 +1766,14 @@ data store. By default, CloudTrail only imports events contained in the S3 bucke CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more -considerations about importing trail events, see Considerations. When you start a new -import, the Destinations and ImportSource parameters are required. Before starting a new -import, disable any access control lists (ACLs) attached to the source S3 bucket. For more -information about disabling ACLs, see Controlling ownership of objects and disabling ACLs -for your bucket. When you retry an import, the ImportID parameter is required. If the -destination event data store is for an organization, you must use the management account to -import trail events. You cannot use the delegated administrator account for the -organization. +considerations about importing trail events, see Considerations for copying trail events in +the CloudTrail User Guide. When you start a new import, the Destinations and ImportSource +parameters are required. Before starting a new import, disable any access control lists +(ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see +Controlling ownership of objects and disabling ACLs for your bucket. When you retry an +import, the ImportID parameter is required. If the destination event data store is for +an organization, you must use the management account to import trail events. You cannot use +the delegated administrator account for the organization. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1790,12 +2017,14 @@ end Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are -integers between 90 and 2557. By default, TerminationProtection is enabled. For event data -stores for CloudTrail events, AdvancedEventSelectors includes or excludes management and -data events in your event data store. For more information about AdvancedEventSelectors, -see AdvancedEventSelectors. For event data stores for Config configuration items, Audit -Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events -of that type in your event data store. +integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or +between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, +TerminationProtection is enabled. For event data stores for CloudTrail events, +AdvancedEventSelectors includes or excludes management or data events in your event data +store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors. For +event data stores for CloudTrail Insights events, Config configuration items, Audit Manager +evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that +type in your event data store. # Arguments - `event_data_store`: The ARN (or the ID suffix of the ARN) of the event data store that @@ -1806,6 +2035,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AdvancedEventSelectors"`: The advanced event selectors used to select events for the event data store. You can configure up to five advanced event selectors for each event data store. +- `"BillingMode"`: You can't change the billing mode from EXTENDABLE_RETENTION_PRICING to + FIXED_RETENTION_PRICING. If BillingMode is set to EXTENDABLE_RETENTION_PRICING and you want + to use FIXED_RETENTION_PRICING instead, you'll need to stop ingestion on the event data + store and create a new event data store that uses FIXED_RETENTION_PRICING. The billing + mode for the event data store determines the cost for ingesting events and the default and + maximum retention period for the event data store. The following are the possible values: + EXTENDABLE_RETENTION_PRICING - This billing mode is generally recommended if you want a + flexible retention period of up to 3653 days (about 10 years). The default retention period + for this billing mode is 366 days. FIXED_RETENTION_PRICING - This billing mode is + recommended if you expect to ingest more than 25 TB of event data per month and need a + retention period of up to 2557 days (about 7 years). The default retention period for this + billing mode is 2557 days. For more information about CloudTrail pricing, see CloudTrail + Pricing and Managing CloudTrail Lake costs. - `"KmsKeyId"`: Specifies the KMS key ID to use to encrypt the events delivered by CloudTrail. The value can be an alias name prefixed by alias/, a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. Disabling or @@ -1824,8 +2066,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Regions, or only from the Region in which it was created. - `"Name"`: The event data store name. - `"OrganizationEnabled"`: Specifies whether an event data store collects events logged for - an organization in Organizations. -- `"RetentionPeriod"`: The retention period, in days. + an organization in Organizations. Only the management account for the organization can + convert an organization event data store to a non-organization event data store, or convert + a non-organization event data store to an organization event data store. +- `"RetentionPeriod"`: The retention period of the event data store, in days. If + BillingMode is set to EXTENDABLE_RETENTION_PRICING, you can set a retention period of up to + 3653 days, the equivalent of 10 years. If BillingMode is set to FIXED_RETENTION_PRICING, + you can set a retention period of up to 2557 days, the equivalent of seven years. + CloudTrail Lake determines whether to retain an event by checking if the eventTime of the + event is within the specified retention period. For example, if you set a retention period + of 90 days, CloudTrail will remove events when the eventTime is older than 90 days. If you + decrease the retention period of an event data store, CloudTrail will remove any events + with an eventTime older than the new retention period. For example, if the previous + retention period was 365 days and you decrease it to 100 days, CloudTrail will remove + events with an eventTime older than 100 days. - `"TerminationProtectionEnabled"`: Indicates that termination protection is enabled and the event data store cannot be automatically deleted. """ @@ -1901,12 +2155,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IsOrganizationTrail"`: Specifies whether the trail is applied to all accounts in an organization in Organizations, or only for the current Amazon Web Services account. The default is false, and cannot be true unless the call is made on behalf of an Amazon Web - Services account that is the management account or delegated administrator account for an - organization in Organizations. If the trail is not an organization trail and this is set to - true, the trail will be created in all Amazon Web Services accounts that belong to the - organization. If the trail is an organization trail and this is set to false, the trail - will remain in the current Amazon Web Services account but be deleted from all member - accounts in the organization. + Services account that is the management account for an organization in Organizations. If + the trail is not an organization trail and this is set to true, the trail will be created + in all Amazon Web Services accounts that belong to the organization. If the trail is an + organization trail and this is set to false, the trail will remain in the current Amazon + Web Services account but be deleted from all member accounts in the organization. Only the + management account for the organization can convert an organization trail to a + non-organization trail, or convert a non-organization trail to an organization trail. - `"KmsKeyId"`: Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. CloudTrail also @@ -1916,7 +2171,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012 12345678-1234-1234-1234-123456789012 - `"S3BucketName"`: Specifies the name of the Amazon S3 bucket designated for publishing - log files. See Amazon S3 Bucket Naming Requirements. + log files. See Amazon S3 Bucket naming rules. - `"S3KeyPrefix"`: Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters. diff --git a/src/services/cloudwatch.jl b/src/services/cloudwatch.jl index 81a6176ec9..0cef4a926d 100644 --- a/src/services/cloudwatch.jl +++ b/src/services/cloudwatch.jl @@ -260,7 +260,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AlarmNames"`: The names of the alarms to retrieve information about. - `"AlarmTypes"`: Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter, only metric alarms are - returned. + returned, even if composite alarms exist in the account. For example, if you omit this + parameter or specify MetricAlarms, the operation returns only a list of metric alarms. It + does not return any composite alarms, even if composite alarms exist in the account. If you + specify CompositeAlarms, the operation returns only a list of composite alarms, and does + not return any metric alarms. - `"ChildrenOfAlarmName"`: If you use this parameter and specify the name of a composite alarm, the operation returns information about the \"children\" alarms of the alarm you specify. These are the metric alarms and composite alarms referenced in the AlarmRule field @@ -653,7 +657,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys point. Average -- the average value from all contributors during the time period represented by that data point. - `"OrderBy"`: Determines what statistic to use to rank the contributors. Valid values are - SUM and MAXIMUM. + Sum and Maximum. """ function get_insight_rule_report( EndTime, Period, RuleName, StartTime; aws_config::AbstractAWSConfig=global_aws_config() @@ -780,7 +784,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ScanBy"`: The order in which data points should be returned. TimestampDescending returns the newest data first and paginates when the MaxDatapoints limit is reached. TimestampAscending returns the oldest data first and paginates when the MaxDatapoints limit - is reached. + is reached. If you omit this parameter, the default of TimestampDescending is used. """ function get_metric_data( EndTime, MetricDataQueries, StartTime; aws_config::AbstractAWSConfig=global_aws_config() @@ -1210,7 +1214,7 @@ Insights rules support tagging. - `resource_arn`: The ARN of the CloudWatch resource that you want to view tags for. The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name The ARN format of a Contributor Insights rule is - arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name For more information + arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. @@ -1245,8 +1249,11 @@ end put_anomaly_detector(params::Dict{String,<:Any}) Creates an anomaly detection model for a CloudWatch metric. You can use the model to -display a band of expected normal values when the metric is graphed. For more information, -see CloudWatch Anomaly Detection. +display a band of expected normal values when the metric is graphed. If you have enabled +unified cross-account observability, and this account is a monitoring account, the metric +can be in the same account or a source account. You can specify the account ID in the +object you specify in the SingleMetricAnomalyDetector parameter. For more information, see +CloudWatch Anomaly Detection. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1255,6 +1262,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys model. You can specify as many as 10 time ranges. The configuration can also include the time zone to use for the metric. - `"Dimensions"`: The metric dimensions to create the anomaly detection model for. +- `"MetricCharacteristics"`: Use this object to include parameters to provide information + about your metric to CloudWatch to help it build more accurate anomaly detection models. + Currently, it includes the PeriodicSpikes parameter. - `"MetricMathAnomalyDetector"`: The metric math anomaly detector to be created. When using MetricMathAnomalyDetector, you cannot include the following parameters in the same operation: Dimensions MetricName Namespace Stat the @@ -1265,7 +1275,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SingleMetricAnomalyDetector"`: A single metric anomaly detector to be created. When using SingleMetricAnomalyDetector, you cannot include the following parameters in the same operation: Dimensions MetricName Namespace Stat the - MetricMatchAnomalyDetector parameters of PutAnomalyDetectorInput Instead, specify the + MetricMathAnomalyDetector parameters of PutAnomalyDetectorInput Instead, specify the single metric anomaly detector attributes as part of the property SingleMetricAnomalyDetector. - `"Stat"`: The statistic to use for the metric and the anomaly detection model. @@ -1296,26 +1306,28 @@ many as 100 underlying alarms. Any single alarm can be included in the rule expr as many as 150 composite alarms. Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM -state only when more than one of the underlying metric alarms are in ALARM state. -Currently, the only alarm actions that can be taken by composite alarms are notifying SNS -topics. It is possible to create a loop or cycle of composite alarms, where composite -alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm -A. In this scenario, you can't delete any composite alarm that is part of the cycle because -there is always still a composite alarm that depends on that alarm that you want to delete. -To get out of such a situation, you must break the cycle by changing the rule of one of the -composite alarms in the cycle to remove a dependency that creates the cycle. The simplest -change to make to break a cycle is to change the AlarmRule of one of the alarms to false. -Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the -evaluation path. When this operation creates an alarm, the alarm state is immediately set -to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any -actions associated with the new state are then executed. For a composite alarm, this -initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA -state. When you update an existing alarm, its state is left unchanged, but the update -completely overwrites the previous configuration of the alarm. To use this operation, you -must be signed on with the cloudwatch:PutCompositeAlarm permission that is scoped to *. You -can't create a composite alarms if your cloudwatch:PutCompositeAlarm permission has a -narrower scope. If you are an IAM user, you must have iam:CreateServiceLinkedRole to create -a composite alarm that has Systems Manager OpsItem actions. +state only when more than one of the underlying metric alarms are in ALARM state. Composite +alarms can take the following actions: Notify Amazon SNS topics. Invoke Lambda +functions. Create OpsItems in Systems Manager Ops Center. Create incidents in Systems +Manager Incident Manager. It is possible to create a loop or cycle of composite alarms, +where composite alarm A depends on composite alarm B, and composite alarm B also depends on +composite alarm A. In this scenario, you can't delete any composite alarm that is part of +the cycle because there is always still a composite alarm that depends on that alarm that +you want to delete. To get out of such a situation, you must break the cycle by changing +the rule of one of the composite alarms in the cycle to remove a dependency that creates +the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one +of the alarms to false. Additionally, the evaluation of composite alarms stops if +CloudWatch detects a cycle in the evaluation path. When this operation creates an alarm, +the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and +its state is set appropriately. Any actions associated with the new state are then +executed. For a composite alarm, this initial time after creation is the only time that the +alarm can be in INSUFFICIENT_DATA state. When you update an existing alarm, its state is +left unchanged, but the update completely overwrites the previous configuration of the +alarm. To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm +permission that is scoped to *. You can't create a composite alarms if your +cloudwatch:PutCompositeAlarm permission has a narrower scope. If you are an IAM user, you +must have iam:CreateServiceLinkedRole to create a composite alarm that has Systems Manager +OpsItem actions. # Arguments - `alarm_name`: The name for the composite alarm. This name must be unique within the @@ -1364,19 +1376,38 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified. - `"AlarmActions"`: The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid - Values: arn:aws:sns:region:account-id:sns-topic-name | - arn:aws:ssm:region:account-id:opsitem:severity + Values: ] Amazon SNS actions: arn:aws:sns:region:account-id:sns-topic-name Lambda + actions: Invoke the latest version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name Invoke a specific version of a + Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + Invoke a function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name Systems Manager + actions: arn:aws:ssm:region:account-id:opsitem:severity - `"AlarmDescription"`: The description for the composite alarm. - `"InsufficientDataActions"`: The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon - Resource Name (ARN). Valid Values: arn:aws:sns:region:account-id:sns-topic-name + Resource Name (ARN). Valid Values: ] Amazon SNS actions: + arn:aws:sns:region:account-id:sns-topic-name Lambda actions: Invoke the latest + version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + Invoke a specific version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name:version-number Invoke a + function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name - `"OKActions"`: The actions to execute when this alarm transitions to an OK state from any - other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: - arn:aws:sns:region:account-id:sns-topic-name -- `"Tags"`: A list of key-value pairs to associate with the composite alarm. You can - associate as many as 50 tags with an alarm. Tags can help you organize and categorize your - resources. You can also use them to scope user permissions, by granting a user permission - to access or change only resources with certain tag values. + other state. Each action is specified as an Amazon Resource Name (ARN). Valid Values: ] + Amazon SNS actions: arn:aws:sns:region:account-id:sns-topic-name Lambda actions: + Invoke the latest version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name Invoke a specific version of a + Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + Invoke a function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name +- `"Tags"`: A list of key-value pairs to associate with the alarm. You can associate as + many as 50 tags with an alarm. To be able to associate tags with the alarm when you create + the alarm, you must have the cloudwatch:TagResource permission. Tags can help you organize + and categorize your resources. You can also use them to scope user permissions by granting + a user permission to access or change only resources with certain tag values. If you are + using this operation to update an existing alarm, any tags you specify in this parameter + are ignored. To change the tags of an existing alarm, use TagResource or UntagResource. """ function put_composite_alarm( AlarmName, AlarmRule; aws_config::AbstractAWSConfig=global_aws_config() @@ -1632,9 +1663,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 Autoscaling action: arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-fri - endly-name:policyName/policy-friendly-name SNS notification action: - arn:aws:sns:region:account-id:sns-topic-name:autoScalingGroupName/group-friendly-name:policy - Name/policy-friendly-name SSM integration actions: + endly-name:policyName/policy-friendly-name Lambda actions: Invoke the latest + version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + Invoke a specific version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name:version-number Invoke a + function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name SNS notification + action: arn:aws:sns:region:account-id:sns-topic-name SSM integration actions: arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name arn:aws:ssm-incidents::account-id:responseplan/response-plan-name - `"AlarmDescription"`: The description for the alarm. @@ -1649,9 +1684,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples. Valid Values: evaluate | ignore -- `"ExtendedStatistic"`: The percentile statistic for the metric specified in MetricName. - Specify a value between p0.0 and p100. When you call PutMetricAlarm and specify a - MetricName, you must specify either Statistic or ExtendedStatistic, but not both. +- `"ExtendedStatistic"`: The extended statistic for the metric specified in MetricName. + When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or + ExtendedStatistic but not both. If you specify ExtendedStatistic, the following are valid + values: p90 tm90 tc90 ts90 wm90 IQM PR(n:m) where n and m are + values of the metric TC(X%:X%) where X is between 10 and 90 inclusive. TM(X%:X%) + where X is between 10 and 90 inclusive. TS(X%:X%) where X is between 10 and 90 + inclusive. WM(X%:X%) where X is between 10 and 90 inclusive. For more information + about these extended statistics, see CloudWatch statistics definitions. - `"InsufficientDataActions"`: The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid values: EC2 actions: arn:aws:automate:region:ec2:stop @@ -1663,24 +1703,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 Autoscaling action: arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-fri - endly-name:policyName/policy-friendly-name SNS notification action: - arn:aws:sns:region:account-id:sns-topic-name:autoScalingGroupName/group-friendly-name:policy - Name/policy-friendly-name SSM integration actions: + endly-name:policyName/policy-friendly-name Lambda actions: Invoke the latest + version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + Invoke a specific version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name:version-number Invoke a + function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name SNS notification + action: arn:aws:sns:region:account-id:sns-topic-name SSM integration actions: arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name arn:aws:ssm-incidents::account-id:responseplan/response-plan-name - `"MetricName"`: The name for the metric associated with the alarm. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array. If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of - the Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters. Instead, you - specify all this information in the Metrics array. + the Namespace, Dimensions, Period, Unit, Statistic, or ExtendedStatistic parameters. + Instead, you specify all this information in the Metrics array. - `"Metrics"`: An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array. Each item in the Metrics array either retrieves a metric or performs a math expression. One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnData to true for this object in the array. For more information, see MetricDataQuery. If you use - the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, - Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. + the Metrics parameter, you cannot include the Namespace, MetricName, Dimensions, Period, + Unit, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array. - `"Namespace"`: The namespace for the metric associated specified in MetricName. @@ -1694,9 +1738,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 Autoscaling action: arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-fri - endly-name:policyName/policy-friendly-name SNS notification action: - arn:aws:sns:region:account-id:sns-topic-name:autoScalingGroupName/group-friendly-name:policy - Name/policy-friendly-name SSM integration actions: + endly-name:policyName/policy-friendly-name Lambda actions: Invoke the latest + version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + Invoke a specific version of a Lambda function: + arn:aws:lambda:region:account-id:function:function-name:version-number Invoke a + function by using an alias Lambda function: + arn:aws:lambda:region:account-id:function:function-name:alias-name SNS notification + action: arn:aws:sns:region:account-id:sns-topic-name SSM integration actions: arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name arn:aws:ssm-incidents::account-id:responseplan/response-plan-name - `"Period"`: The length, in seconds, used each time the metric specified in MetricName is @@ -1717,11 +1765,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys and specify a MetricName, you must specify either Statistic or ExtendedStatistic, but not both. - `"Tags"`: A list of key-value pairs to associate with the alarm. You can associate as - many as 50 tags with an alarm. Tags can help you organize and categorize your resources. - You can also use them to scope user permissions by granting a user permission to access or - change only resources with certain tag values. If you are using this operation to update an - existing alarm, any tags you specify in this parameter are ignored. To change the tags of - an existing alarm, use TagResource or UntagResource. + many as 50 tags with an alarm. To be able to associate tags with the alarm when you create + the alarm, you must have the cloudwatch:TagResource permission. Tags can help you organize + and categorize your resources. You can also use them to scope user permissions by granting + a user permission to access or change only resources with certain tag values. If you are + using this operation to update an existing alarm, any tags you specify in this parameter + are ignored. To change the tags of an existing alarm, use TagResource or UntagResource. - `"Threshold"`: The value against which the specified statistic is compared. This parameter is required for alarms based on static thresholds, but should not be used for alarms based on anomaly detection models. @@ -1740,14 +1789,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that - specify a unit of measure, such as Percent, are aggregated separately. If you don't specify - Unit, CloudWatch retrieves all unit types that have been published for the metric and - attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the - alarm works as intended. However, if the metric is published with multiple types of units - and you don't specify a unit, the alarm's behavior is not defined and it behaves - unpredictably. We recommend omitting Unit so that you don't inadvertently specify an - incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck - in the INSUFFICIENT DATA state. + specify a unit of measure, such as Percent, are aggregated separately. If you are creating + an alarm based on a metric math expression, you can specify the unit for each metric (if + needed) within the objects in the Metrics array. If you don't specify Unit, CloudWatch + retrieves all unit types that have been published for the metric and attempts to evaluate + the alarm. Usually, metrics are published with only one unit, so the alarm works as + intended. However, if the metric is published with multiple types of units and you don't + specify a unit, the alarm's behavior is not defined and it behaves unpredictably. We + recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is + not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT + DATA state. """ function put_metric_alarm( AlarmName, @@ -1800,7 +1851,7 @@ with the specified metric. If the specified metric does not exist, CloudWatch cr metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the -period by using the Values and Counts fields in the MetricDatum structure. Using the Values +period by using the Values and Counts fields in the MetricData structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload @@ -1893,9 +1944,9 @@ more information, see CloudWatch cross-account observability. name must be different than the names of other metric streams in this account and Region. If you are updating a metric stream, specify the name of that stream here. Valid characters are A-Z, a-z, 0-9, \"-\" and \"_\". -- `output_format`: The output format for the stream. Valid values are json and - opentelemetry0.7. For more information about metric stream output formats, see Metric - streams output formats. +- `output_format`: The output format for the stream. Valid values are json, + opentelemetry1.0, and opentelemetry0.7. For more information about metric stream output + formats, see Metric streams output formats. - `role_arn`: The ARN of an IAM role that this metric stream will use to access Amazon Kinesis Data Firehose resources. This IAM role must already exist and must be in the same account as the metric stream. This IAM role must include the following permissions: @@ -1918,8 +1969,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys list of additional statistics to stream for those metrics. The additional statistics that you can stream depend on the stream's OutputFormat. If the OutputFormat is json, you can stream any additional statistic that is supported by CloudWatch, listed in CloudWatch - statistics definitions. If the OutputFormat is opentelemetry0.7, you can stream percentile - statistics such as p95, p99.9, and so on. + statistics definitions. If the OutputFormat is opentelemetry1.0 or opentelemetry0.7, you + can stream percentile statistics such as p95, p99.9, and so on. - `"Tags"`: A list of key-value pairs to associate with the metric stream. You can associate as many as 50 tags with a metric stream. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a @@ -2123,7 +2174,7 @@ value for that tag. You can associate as many as 50 tags with a CloudWatch resou - `resource_arn`: The ARN of the CloudWatch resource that you're adding tags to. The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name The ARN format of a Contributor Insights rule is - arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name For more information + arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. - `tags`: The list of key-value pairs to associate with the alarm. @@ -2167,7 +2218,7 @@ Removes one or more tags from the specified resource. - `resource_arn`: The ARN of the CloudWatch resource that you're removing tags from. The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name The ARN format of a Contributor Insights rule is - arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name For more information + arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. - `tag_keys`: The list of tag keys to remove from the resource. diff --git a/src/services/cloudwatch_events.jl b/src/services/cloudwatch_events.jl index 0a921b0fa6..d1a88bd445 100644 --- a/src/services/cloudwatch_events.jl +++ b/src/services/cloudwatch_events.jl @@ -1563,11 +1563,11 @@ for a rule. Firehose delivery stream (Kinesis Data Firehose) Inspector asses template (Amazon Inspector) Kinesis stream (Kinesis Data Stream) Lambda function Redshift clusters (Data API statement execution) Amazon SNS topic Amazon SQS queues (includes FIFO queues SSM Automation SSM OpsItem SSM Run Command Step Functions -state machines Creating rules with built-in targets is supported only in the Management -Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API -call, EC2 StopInstances API call, and EC2 TerminateInstances API call. For some target -types, PutTargets provides target-specific parameters. If the target is a Kinesis data -stream, you can optionally specify which shard the event goes to by using the +state machines Creating rules with built-in targets is supported only in the Amazon Web +Services Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 +RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. +For some target types, PutTargets provides target-specific parameters. If the target is a +Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field. To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For Lambda diff --git a/src/services/cloudwatch_logs.jl b/src/services/cloudwatch_logs.jl index 54c990e424..7a64d4447d 100644 --- a/src/services/cloudwatch_logs.jl +++ b/src/services/cloudwatch_logs.jl @@ -5,51 +5,76 @@ using AWS.Compat using AWS.UUIDs """ - associate_kms_key(kms_key_id, log_group_name) - associate_kms_key(kms_key_id, log_group_name, params::Dict{String,<:Any}) - -Associates the specified KMS key with the specified log group. Associating a KMS key with a -log group overrides any existing associations between the log group and a KMS key. After a -KMS key is associated with a log group, all newly ingested data for the log group is -encrypted using the KMS key. This association is stored as long as the data encrypted with -the KMS keyis still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this -data whenever it is requested. CloudWatch Logs supports only symmetric KMS keys. Do not -use an associate an asymmetric KMS key with your log group. For more information, see Using -Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take -effect. If you attempt to associate a KMS key with a log group but the KMS key does not -exist or the KMS key is disabled, you receive an InvalidParameterException error. + associate_kms_key(kms_key_id) + associate_kms_key(kms_key_id, params::Dict{String,<:Any}) + +Associates the specified KMS key with either one log group in the account, or with all +stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, +you specify either the logGroupName parameter or the resourceIdentifier parameter. You +can't specify both of those parameters in the same operation. Specify the logGroupName +parameter to cause all log events stored in the log group to be encrypted with that key. +Only the log events ingested after the key is associated are encrypted with that key. +Associating a KMS key with a log group overrides any existing associations between the log +group and a KMS key. After a KMS key is associated with a log group, all newly ingested +data for the log group is encrypted using the KMS key. This association is stored as long +as the data encrypted with the KMS key is still within CloudWatch Logs. This enables +CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log +group does not cause the results of queries of that log group to be encrypted with that +key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey +operation with the resourceIdentifier parameter that specifies a query-result resource. +Specify the resourceIdentifier parameter with a query-result resource, to use that key to +encrypt the stored results of all future StartQuery operations in the account. The response +from a GetQueryResults operation will still return the query results in plain text. Even if +you have not associated a key with your query results, the query results are encrypted when +stored, using the default CloudWatch Logs method. If you run a query from a monitoring +account that queries logs in a source account, the query results key from the monitoring +account, if any, is used. If you delete the key that is used to encrypt log events or +log group query results, then all the associated stored log events or query results that +were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports +only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group +or query results. For more information, see Using Symmetric and Asymmetric Keys. It can +take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS +key with a log group but the KMS key does not exist or the KMS key is disabled, you receive +an InvalidParameterException error. # Arguments - `kms_key_id`: The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. This must be a symmetric KMS key. For more information, see Amazon Resource Names and Using Symmetric and Asymmetric Keys. -- `log_group_name`: The name of the log group. -""" -function associate_kms_key( - kmsKeyId, logGroupName; aws_config::AbstractAWSConfig=global_aws_config() -) +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"logGroupName"`: The name of the log group. In your AssociateKmsKey operation, you must + specify either the resourceIdentifier parameter or the logGroup parameter, but you can't + specify both. +- `"resourceIdentifier"`: Specifies the target for this operation. You must specify one of + the following: Specify the following ARN to have future GetQueryResults operations in + this account encrypt the results with the specified KMS key. Replace REGION and ACCOUNT_ID + with your Region and account ID. arn:aws:logs:REGION:ACCOUNT_ID:query-result:* Specify + the ARN of a log group to have CloudWatch Logs use the KMS key to encrypt log events that + are ingested and stored by that log group. The log group ARN must be in the following + format. Replace REGION and ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME In your AssociateKmsKey + operation, you must specify either the resourceIdentifier parameter or the logGroup + parameter, but you can't specify both. +""" +function associate_kms_key(kmsKeyId; aws_config::AbstractAWSConfig=global_aws_config()) return cloudwatch_logs( "AssociateKmsKey", - Dict{String,Any}("kmsKeyId" => kmsKeyId, "logGroupName" => logGroupName); + Dict{String,Any}("kmsKeyId" => kmsKeyId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function associate_kms_key( kmsKeyId, - logGroupName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cloudwatch_logs( "AssociateKmsKey", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("kmsKeyId" => kmsKeyId, "logGroupName" => logGroupName), - params, - ), + mergewith(_merge, Dict{String,Any}("kmsKeyId" => kmsKeyId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -85,6 +110,76 @@ function cancel_export_task( ) end +""" + create_delivery(delivery_destination_arn, delivery_source_name) + create_delivery(delivery_destination_arn, delivery_source_name, params::Dict{String,<:Any}) + +Creates a delivery. A delivery is a connection between a logical delivery source and a +logical delivery destination that you have already created. Only some Amazon Web Services +services support being configured as a delivery source using this operation. These services +are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web +Services services. A delivery destination can represent a log group in CloudWatch Logs, an +Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a +supported Amazon Web Services service and a destination, you must do the following: +Create a delivery source, which is a logical object that represents the resource that is +actually sending the logs. For more information, see PutDeliverySource. Create a delivery +destination, which is a logical object that represents the actual delivery destination. For +more information, see PutDeliveryDestination. If you are delivering logs cross-account, +you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM +policy to the destination. This policy allows delivery to that destination. Use +CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery +destination. You can configure a single delivery source to send logs to multiple +destinations by creating multiple deliveries. You can also create multiple deliveries to +configure multiple delivery sources to send logs to the same delivery destination. You +can't update an existing delivery. You can only create and delete deliveries. + +# Arguments +- `delivery_destination_arn`: The ARN of the delivery destination to use for this delivery. +- `delivery_source_name`: The name of the delivery source to use for this delivery. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: An optional list of key-value pairs to associate with the resource. For more + information about tagging, see Tagging Amazon Web Services resources +""" +function create_delivery( + deliveryDestinationArn, + deliverySourceName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "CreateDelivery", + Dict{String,Any}( + "deliveryDestinationArn" => deliveryDestinationArn, + "deliverySourceName" => deliverySourceName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_delivery( + deliveryDestinationArn, + deliverySourceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "CreateDelivery", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "deliveryDestinationArn" => deliveryDestinationArn, + "deliverySourceName" => deliverySourceName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_export_task(destination, from, log_group_name, to) create_export_task(destination, from, log_group_name, to, params::Dict{String,<:Any}) @@ -166,33 +261,117 @@ function create_export_task( ) end +""" + create_log_anomaly_detector(log_group_arn_list) + create_log_anomaly_detector(log_group_arn_list, params::Dict{String,<:Any}) + +Creates an anomaly detector that regularly scans one or more log groups and look for +patterns and anomalies in the logs. An anomaly detector can help surface issues by +automatically discovering anomalies in your log event traffic. An anomaly detector uses +machine learning algorithms to scan log events and find patterns. A pattern is a shared +text structure that recurs among your log fields. Patterns provide a useful tool for +analyzing large sets of logs because a large number of log events can often be compressed +into a few patterns. The anomaly detector uses pattern recognition to find anomalies, which +are unusual log events. It uses the evaluationFrequency to compare current log events and +patterns with trained baselines. Fields within a pattern are called tokens. Fields that +vary within a pattern, such as a request ID or timestamp, are referred to as dynamic tokens +and represented by <*>. The following is an example of a pattern: [INFO] Request +time: <*> ms This pattern represents log events like [INFO] Request time: 327 ms and +other similar log events that differ only by the number, in this csse 327. When the pattern +is displayed, the different numbers are replaced by <*> Any parts of log events +that are masked as sensitive data are not scanned for anomalies. For more information about +masking sensitive data, see Help protect sensitive log data with masking. + +# Arguments +- `log_group_arn_list`: An array containing the ARN of the log group that this anomaly + detector will watch. You can specify only one log group ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"anomalyVisibilityTime"`: The number of days to have visibility on an anomaly. After + this time period has elapsed for an anomaly, it will be automatically baselined and the + anomaly detector will treat new occurrences of a similar anomaly as normal. Therefore, if + you do not correct the cause of an anomaly during the time period specified in + anomalyVisibilityTime, it will be considered normal going forward and will not be detected + as an anomaly. +- `"detectorName"`: A name for this anomaly detector. +- `"evaluationFrequency"`: Specifies how often the anomaly detector is to run and look for + anomalies. Set this value according to the frequency that the log group receives new logs. + For example, if the log group receives new log events every 10 minutes, then 15 minutes + might be a good setting for evaluationFrequency . +- `"filterPattern"`: You can use this parameter to limit the anomaly detection model to + examine only log events that match the pattern you specify here. For more information, see + Filter and Pattern Syntax. +- `"kmsKeyId"`: Optionally assigns a KMS key to secure this anomaly detector and its + findings. If a key is assigned, the anomalies found and the model used by this detector are + encrypted at rest with the key. If a key is assigned to an anomaly detector, a user must + have permissions for both this key and for the anomaly detector to retrieve information + about the anomalies that it finds. For more information about using a KMS key and to see + the required IAM policy, see Use a KMS key with an anomaly detector. +- `"tags"`: An optional list of key-value pairs to associate with the resource. For more + information about tagging, see Tagging Amazon Web Services resources +""" +function create_log_anomaly_detector( + logGroupArnList; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "CreateLogAnomalyDetector", + Dict{String,Any}("logGroupArnList" => logGroupArnList); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_log_anomaly_detector( + logGroupArnList, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "CreateLogAnomalyDetector", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("logGroupArnList" => logGroupArnList), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_log_group(log_group_name) create_log_group(log_group_name, params::Dict{String,<:Any}) -Creates a log group with the specified name. You can create up to 20,000 log groups per -account. You must use the following guidelines when naming a log group: Log group names -must be unique within a Region for an Amazon Web Services account. Log group names can be -between 1 and 512 characters long. Log group names consist of the following characters: -a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' -(number sign) When you create a log group, by default the log events in the log group do -not expire. To set a retention policy so that events expire and are deleted after a -specified time, use PutRetentionPolicy. If you associate an KMS key with the log group, -ingested data is encrypted using the KMS key. This association is stored as long as the -data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch -Logs to decrypt this data whenever it is requested. If you attempt to associate a KMS key -with the log group but the KMS key does not exist or the KMS key is disabled, you receive -an InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do -not associate an asymmetric KMS key with your log group. For more information, see Using +Creates a log group with the specified name. You can create up to 1,000,000 log groups per +Region per account. You must use the following guidelines when naming a log group: Log +group names must be unique within a Region for an Amazon Web Services account. Log group +names can be between 1 and 512 characters long. Log group names consist of the following +characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' +(period), and '#' (number sign) Log group names can't start with the string aws/ When +you create a log group, by default the log events in the log group do not expire. To set a +retention policy so that events expire and are deleted after a specified time, use +PutRetentionPolicy. If you associate an KMS key with the log group, ingested data is +encrypted using the KMS key. This association is stored as long as the data encrypted with +the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this +data whenever it is requested. If you attempt to associate a KMS key with the log group but +the KMS key does not exist or the KMS key is disabled, you receive an +InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do not +associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys. # Arguments -- `log_group_name`: The name of the log group. +- `log_group_name`: A name for the log group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"kmsKeyId"`: The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. For more information, see Amazon Resource Names. +- `"logGroupClass"`: Use this parameter to specify the log group class for this log group. + There are two classes: The Standard log class supports all CloudWatch Logs features. + The Infrequent Access log class supports a subset of CloudWatch Logs features and incurs + lower costs. If you omit this parameter, the default of STANDARD is used. The value of + logGroupClass can't be changed after a log group is created. For details about the + features supported by each class, see Log classes - `"tags"`: The key-value pairs to use for the tags. You can grant users access to certain log groups while preventing them from accessing other log groups. To do so, tag your groups and use IAM policies that refer to those tags. To assign tags when you create a log group, @@ -277,13 +456,17 @@ end delete_account_policy(policy_name, policy_type) delete_account_policy(policy_name, policy_type, params::Dict{String,<:Any}) -Deletes a CloudWatch Logs account policy. To use this operation, you must be signed on with -the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions. +Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log +groups or a subset of log groups in the account. Log-group level policies will still be in +effect. To use this operation, you must be signed on with the correct permissions depending +on the type of policy that you are deleting. To delete a data protection policy, you must +have the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions. To +delete a subscription filter policy, you must have the logs:DeleteSubscriptionFilter and +logs:DeleteAccountPolicy permissions. # Arguments - `policy_name`: The name of the policy to delete. -- `policy_type`: The type of policy to delete. Currently, the only valid value is - DATA_PROTECTION_POLICY. +- `policy_type`: The type of policy to delete. """ function delete_account_policy( @@ -355,6 +538,149 @@ function delete_data_protection_policy( ) end +""" + delete_delivery(id) + delete_delivery(id, params::Dict{String,<:Any}) + +Deletes s delivery. A delivery is a connection between a logical delivery source and a +logical delivery destination. Deleting a delivery only deletes the connection between the +delivery source and delivery destination. It does not delete the delivery destination or +the delivery source. + +# Arguments +- `id`: The unique ID of the delivery to delete. You can find the ID of a delivery with the + DescribeDeliveries operation. + +""" +function delete_delivery(id; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DeleteDelivery", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_delivery( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteDelivery", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_delivery_destination(name) + delete_delivery_destination(name, params::Dict{String,<:Any}) + +Deletes a delivery destination. A delivery is a connection between a logical delivery +source and a logical delivery destination. You can't delete a delivery destination if any +current deliveries are associated with it. To find whether any deliveries are associated +with this delivery destination, use the DescribeDeliveries operation and check the +deliveryDestinationArn field in the results. + +# Arguments +- `name`: The name of the delivery destination that you want to delete. You can find a list + of delivery destionation names by using the DescribeDeliveryDestinations operation. + +""" +function delete_delivery_destination( + name; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteDeliveryDestination", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_delivery_destination( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteDeliveryDestination", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_delivery_destination_policy(delivery_destination_name) + delete_delivery_destination_policy(delivery_destination_name, params::Dict{String,<:Any}) + +Deletes a delivery destination policy. For more information about these policies, see +PutDeliveryDestinationPolicy. + +# Arguments +- `delivery_destination_name`: The name of the delivery destination that you want to delete + the policy for. + +""" +function delete_delivery_destination_policy( + deliveryDestinationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteDeliveryDestinationPolicy", + Dict{String,Any}("deliveryDestinationName" => deliveryDestinationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_delivery_destination_policy( + deliveryDestinationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "DeleteDeliveryDestinationPolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("deliveryDestinationName" => deliveryDestinationName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_delivery_source(name) + delete_delivery_source(name, params::Dict{String,<:Any}) + +Deletes a delivery source. A delivery is a connection between a logical delivery source and +a logical delivery destination. You can't delete a delivery source if any current +deliveries are associated with it. To find whether any deliveries are associated with this +delivery source, use the DescribeDeliveries operation and check the deliverySourceName +field in the results. + +# Arguments +- `name`: The name of the delivery source that you want to delete. + +""" +function delete_delivery_source(name; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DeleteDeliverySource", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_delivery_source( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteDeliverySource", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_destination(destination_name) delete_destination(destination_name, params::Dict{String,<:Any}) @@ -394,6 +720,44 @@ function delete_destination( ) end +""" + delete_log_anomaly_detector(anomaly_detector_arn) + delete_log_anomaly_detector(anomaly_detector_arn, params::Dict{String,<:Any}) + +Deletes the specified CloudWatch Logs anomaly detector. + +# Arguments +- `anomaly_detector_arn`: The ARN of the anomaly detector to delete. You can find the ARNs + of log anomaly detectors in your account by using the ListLogAnomalyDetectors operation. + +""" +function delete_log_anomaly_detector( + anomalyDetectorArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DeleteLogAnomalyDetector", + Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_log_anomaly_detector( + anomalyDetectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "DeleteLogAnomalyDetector", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_log_group(log_group_name) delete_log_group(log_group_name, params::Dict{String,<:Any}) @@ -670,8 +1034,7 @@ Returns a list of all CloudWatch Logs account policies in the account. # Arguments - `policy_type`: Use this parameter to limit the returned policies to only the policies - that match the policy type that you specify. Currently, the only valid value is - DATA_PROTECTION_POLICY. + that match the policy type that you specify. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -708,6 +1071,93 @@ function describe_account_policies( ) end +""" + describe_deliveries() + describe_deliveries(params::Dict{String,<:Any}) + +Retrieves a list of the deliveries that have been created in the account. A delivery is a +connection between a delivery source and a delivery destination . A delivery source +represents an Amazon Web Services resource that sends logs to an logs delivery destination. +The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web +Services services support being configured as a delivery source. These services are listed +in Enable logging from Amazon Web Services services. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: Optionally specify the maximum number of deliveries to return in the response. +- `"nextToken"`: +""" +function describe_deliveries(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DescribeDeliveries"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_deliveries( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DescribeDeliveries", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + describe_delivery_destinations() + describe_delivery_destinations(params::Dict{String,<:Any}) + +Retrieves a list of the delivery destinations that have been created in the account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: Optionally specify the maximum number of delivery destinations to return in + the response. +- `"nextToken"`: +""" +function describe_delivery_destinations(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DescribeDeliveryDestinations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_delivery_destinations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DescribeDeliveryDestinations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_delivery_sources() + describe_delivery_sources(params::Dict{String,<:Any}) + +Retrieves a list of the delivery sources that have been created in the account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: Optionally specify the maximum number of delivery sources to return in the + response. +- `"nextToken"`: +""" +function describe_delivery_sources(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DescribeDeliverySources"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_delivery_sources( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DescribeDeliverySources", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_destinations() describe_destinations(params::Dict{String,<:Any}) @@ -799,6 +1249,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that are linked to the monitoring account. - `"limit"`: The maximum number of items returned. If you don't specify a value, the default is up to 50 items. +- `"logGroupClass"`: Specifies the log group class for this log group. There are two + classes: The Standard log class supports all CloudWatch Logs features. The Infrequent + Access log class supports a subset of CloudWatch Logs features and incurs lower costs. + For details about the features supported by each class, see Log classes - `"logGroupNamePattern"`: If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo, log groups named FooBar, aws/Foo, and @@ -947,8 +1401,10 @@ end describe_query_definitions(params::Dict{String,<:Any}) This operation returns a paginated list of your saved CloudWatch Logs Insights query -definitions. You can use the queryDefinitionNamePrefix parameter to limit the results to -only the query definitions that have names that start with a certain string. +definitions. You can retrieve query definitions from the current account or from a source +account that is linked to the current account. You can use the queryDefinitionNamePrefix +parameter to limit the results to only the query definitions that have names that start +with a certain string. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1047,41 +1503,53 @@ function describe_subscription_filters( end """ - disassociate_kms_key(log_group_name) - disassociate_kms_key(log_group_name, params::Dict{String,<:Any}) - -Disassociates the associated KMS key from the specified log group. After the KMS key is -disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for -the log group. All previously ingested data remains encrypted, and CloudWatch Logs requires -permissions for the KMS key whenever the encrypted data is requested. Note that it can take -up to 5 minutes for this operation to take effect. + disassociate_kms_key() + disassociate_kms_key(params::Dict{String,<:Any}) -# Arguments -- `log_group_name`: The name of the log group. +Disassociates the specified KMS key from the specified log group or from all CloudWatch +Logs Insights query results in the account. When you use DisassociateKmsKey, you specify +either the logGroupName parameter or the resourceIdentifier parameter. You can't specify +both of those parameters in the same operation. Specify the logGroupName parameter to +stop using the KMS key to encrypt future log events ingested and stored in the log group. +Instead, they will be encrypted with the default CloudWatch Logs method. The log events +that were ingested while the key was associated with the log group are still encrypted with +that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data +is accessed. Specify the resourceIdentifier parameter with the query-result resource to +stop using the KMS key to encrypt the results of all future StartQuery operations in the +account. They will instead be encrypted with the default CloudWatch Logs method. The +results from queries that ran while the key was associated with the account are still +encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key +whenever that data is accessed. It can take up to 5 minutes for this operation to take +effect. -""" -function disassociate_kms_key( - logGroupName; aws_config::AbstractAWSConfig=global_aws_config() -) - return cloudwatch_logs( - "DisassociateKmsKey", - Dict{String,Any}("logGroupName" => logGroupName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"logGroupName"`: The name of the log group. In your DisassociateKmsKey operation, you + must specify either the resourceIdentifier parameter or the logGroup parameter, but you + can't specify both. +- `"resourceIdentifier"`: Specifies the target for this operation. You must specify one of + the following: Specify the ARN of a log group to stop having CloudWatch Logs use the KMS + key to encrypt log events that are ingested and stored by that log group. After you run + this operation, CloudWatch Logs encrypts ingested log events with the default CloudWatch + Logs method. The log group ARN must be in the following format. Replace REGION and + ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME Specify the following ARN to + stop using this key to encrypt the results of future StartQuery operations in this account. + Replace REGION and ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:query-result:* In your DisssociateKmsKey operation, you + must specify either the resourceIdentifier parameter or the logGroup parameter, but you + can't specify both. +""" +function disassociate_kms_key(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DisassociateKmsKey"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function disassociate_kms_key( - logGroupName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return cloudwatch_logs( - "DisassociateKmsKey", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("logGroupName" => logGroupName), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "DisassociateKmsKey", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end @@ -1155,33 +1623,207 @@ end get_data_protection_policy(log_group_identifier) get_data_protection_policy(log_group_identifier, params::Dict{String,<:Any}) -Returns information about a log group data protection policy. +Returns information about a log group data protection policy. + +# Arguments +- `log_group_identifier`: The name or ARN of the log group that contains the data + protection policy that you want to see. + +""" +function get_data_protection_policy( + logGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "GetDataProtectionPolicy", + Dict{String,Any}("logGroupIdentifier" => logGroupIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_protection_policy( + logGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "GetDataProtectionPolicy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("logGroupIdentifier" => logGroupIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_delivery(id) + get_delivery(id, params::Dict{String,<:Any}) + +Returns complete information about one logical delivery. A delivery is a connection between +a delivery source and a delivery destination . A delivery source represents an Amazon +Web Services resource that sends logs to an logs delivery destination. The destination can +be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support +being configured as a delivery source. These services are listed in Enable logging from +Amazon Web Services services. You need to specify the delivery id in this operation. You +can find the IDs of the deliveries in your account with the DescribeDeliveries operation. + +# Arguments +- `id`: The ID of the delivery that you want to retrieve. + +""" +function get_delivery(id; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "GetDelivery", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_delivery( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "GetDelivery", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_delivery_destination(name) + get_delivery_destination(name, params::Dict{String,<:Any}) + +Retrieves complete information about one delivery destination. + +# Arguments +- `name`: The name of the delivery destination that you want to retrieve. + +""" +function get_delivery_destination(name; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "GetDeliveryDestination", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_delivery_destination( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "GetDeliveryDestination", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_delivery_destination_policy(delivery_destination_name) + get_delivery_destination_policy(delivery_destination_name, params::Dict{String,<:Any}) + +Retrieves the delivery destination policy assigned to the delivery destination that you +specify. For more information about delivery destinations and their policies, see +PutDeliveryDestinationPolicy. + +# Arguments +- `delivery_destination_name`: The name of the delivery destination that you want to + retrieve the policy of. + +""" +function get_delivery_destination_policy( + deliveryDestinationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "GetDeliveryDestinationPolicy", + Dict{String,Any}("deliveryDestinationName" => deliveryDestinationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_delivery_destination_policy( + deliveryDestinationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "GetDeliveryDestinationPolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("deliveryDestinationName" => deliveryDestinationName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_delivery_source(name) + get_delivery_source(name, params::Dict{String,<:Any}) + +Retrieves complete information about one delivery source. + +# Arguments +- `name`: The name of the delivery source that you want to retrieve. + +""" +function get_delivery_source(name; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "GetDeliverySource", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_delivery_source( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "GetDeliverySource", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_log_anomaly_detector(anomaly_detector_arn) + get_log_anomaly_detector(anomaly_detector_arn, params::Dict{String,<:Any}) + +Retrieves information about the log anomaly detector that you specify. # Arguments -- `log_group_identifier`: The name or ARN of the log group that contains the data - protection policy that you want to see. +- `anomaly_detector_arn`: The ARN of the anomaly detector to retrieve information about. + You can find the ARNs of log anomaly detectors in your account by using the + ListLogAnomalyDetectors operation. """ -function get_data_protection_policy( - logGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +function get_log_anomaly_detector( + anomalyDetectorArn; aws_config::AbstractAWSConfig=global_aws_config() ) return cloudwatch_logs( - "GetDataProtectionPolicy", - Dict{String,Any}("logGroupIdentifier" => logGroupIdentifier); + "GetLogAnomalyDetector", + Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_data_protection_policy( - logGroupIdentifier, +function get_log_anomaly_detector( + anomalyDetectorArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cloudwatch_logs( - "GetDataProtectionPolicy", + "GetLogAnomalyDetector", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("logGroupIdentifier" => logGroupIdentifier), params + _merge, Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn), params ), ); aws_config=aws_config, @@ -1279,10 +1921,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the ARN. You must include either logGroupIdentifier or logGroupName, but not both. - `"logGroupName"`: The name of the log group to search. You must include either logGroupIdentifier or logGroupName, but not both. -- `"time"`: The time to set as the center of the query. If you specify time, the 15 minutes - before this time are queries. If you omit time, the 8 minutes before and 8 minutes after - this time are searched. The time value is specified as epoch time, which is the number of - seconds since January 1, 1970, 00:00:00 UTC. +- `"time"`: The time to set as the center of the query. If you specify time, the 8 minutes + before and 8 minutes after this time are searched. If you omit time, the most recent 15 + minutes up to the current time are searched. The time value is specified as epoch time, + which is the number of seconds since January 1, 1970, 00:00:00 UTC. """ function get_log_group_fields(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudwatch_logs( @@ -1350,12 +1992,13 @@ end Returns the results from the specified query. Only the fields requested in the query are returned, along with a @ptr field, which is the identifier for the log record. You can use the value of @ptr in a GetLogRecord operation to get the full log record. GetQueryResults -does not start running a query. To run a query, use StartQuery. If the value of the Status -field in the output is Running, this operation returns only partial results. If you see a -value of Scheduled or Running for the status, you can retry the operation later to see the -final results. If you are using CloudWatch cross-account observability, you can use this -operation in a monitoring account to start queries in linked source accounts. For more -information, see CloudWatch cross-account observability. +does not start running a query. To run a query, use StartQuery. For more information about +how long results of previous queries are available, see CloudWatch Logs quotas. If the +value of the Status field in the output is Running, this operation returns only partial +results. If you see a value of Scheduled or Running for the status, you can retry the +operation later to see the final results. If you are using CloudWatch cross-account +observability, you can use this operation in a monitoring account to start queries in +linked source accounts. For more information, see CloudWatch cross-account observability. # Arguments - `query_id`: The ID number of the query. @@ -1380,6 +2023,66 @@ function get_query_results( ) end +""" + list_anomalies() + list_anomalies(params::Dict{String,<:Any}) + +Returns a list of anomalies that log anomaly detectors have found. For details about the +structure format of each anomaly object that is returned, see the example in this section. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"anomalyDetectorArn"`: Use this to optionally limit the results to only the anomalies + found by a certain anomaly detector. +- `"limit"`: The maximum number of items to return. If you don't specify a value, the + default maximum value of 50 items is used. +- `"nextToken"`: +- `"suppressionState"`: You can specify this parameter if you want to the operation to + return only anomalies that are currently either suppressed or unsuppressed. +""" +function list_anomalies(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "ListAnomalies"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_anomalies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "ListAnomalies", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_log_anomaly_detectors() + list_log_anomaly_detectors(params::Dict{String,<:Any}) + +Retrieves a list of the log anomaly detectors in the account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterLogGroupArn"`: Use this to optionally filter the results to only include anomaly + detectors that are associated with the specified log group. +- `"limit"`: The maximum number of items to return. If you don't specify a value, the + default maximum value of 50 items is used. +- `"nextToken"`: +""" +function list_log_anomaly_detectors(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "ListLogAnomalyDetectors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_log_anomaly_detectors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "ListLogAnomalyDetectors", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1459,14 +2162,15 @@ end put_account_policy(policy_document, policy_name, policy_type) put_account_policy(policy_document, policy_name, policy_type, params::Dict{String,<:Any}) -Creates an account-level data protection policy that applies to all log groups in the -account. A data protection policy can help safeguard sensitive data that's ingested by your -log groups by auditing and masking the sensitive log data. Each account can have only one -account-level policy. Sensitive data is detected and masked when it is ingested into a log -group. When you set a data protection policy, log events ingested into the log groups +Creates an account-level data protection policy or subscription filter policy that applies +to all log groups or a subset of log groups in the account. Data protection policy A data +protection policy can help safeguard sensitive data that's ingested by your log groups by +auditing and masking the sensitive log data. Each account can have only one account-level +data protection policy. Sensitive data is detected and masked when it is ingested into a +log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups -that are created later in this account. The account policy is applied to existing log +that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the @@ -1475,42 +2179,78 @@ parameter set to true to view the unmasked log events. Users with the logs:Unmas view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use -the PutAccountPolicy operation, you must be signed on with the logs:PutDataProtectionPolicy -and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log -groups in the account. You can also use PutDataProtectionPolicy to create a data protection -policy that applies to just one log group. If a log group has its own data protection -policy and the account also has an account-level data protection policy, then the two -policies are cumulative. Any sensitive term specified in either policy is masked. +the PutAccountPolicy operation for a data protection policy, you must be signed on with the +logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy +operation applies to all log groups in the account. You can use PutDataProtectionPolicy to +create a data protection policy that applies to just one log group. If a log group has its +own data protection policy and the account also has an account-level data protection +policy, then the two policies are cumulative. Any sensitive term specified in either policy +is masked. Subscription filter policy A subscription filter policy sets up a real-time +feed of log events from CloudWatch Logs to other Amazon Web Services services. +Account-level subscription filter policies apply to both existing log groups and log groups +that are created later in this account. Supported destinations are Kinesis Data Streams, +Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 +encoded and compressed with the GZIP format. The following destinations are supported for +subscription filters: An Kinesis Data Streams data stream in the same account as the +subscription policy, for same-account delivery. An Firehose data stream in the same +account as the subscription policy, for same-account delivery. A Lambda function in the +same account as the subscription policy, for same-account delivery. A logical destination +in a different account created with PutDestination, for cross-account delivery. Kinesis +Data Streams and Firehose are supported as logical destinations. Each account can have +one account-level subscription filter policy. If you are updating an existing filter, you +must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription +filter operation for any destination except a Lambda function, you must also have the +iam:PassRole permission. # Arguments -- `policy_document`: Specify the data protection policy, in JSON. This policy must include - two JSON blocks: The first block must include both a DataIdentifer array and an Operation - property with an Audit action. The DataIdentifer array lists the types of sensitive data - that you want to mask. For more information about the available options, see Types of data - that you can mask. The Operation property with an Audit action is required to find the - sensitive data terms. This Audit action must contain a FindingsDestination object. You can - optionally use that FindingsDestination object to list one or more destinations to send - audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose - streams, and S3 buckets, they must already exist. The second block must include both a - DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer - array must exactly match the DataIdentifer array in the first block of the policy. The - Operation property with the Deidentify action is what actually masks the data, and it must - contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty. For - an example data protection policy, see the Examples section on this page. The contents of - the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the - policyDocument can also include Name, Description, and Version fields. The Name is - different than the operation's policyName parameter, and is used as a dimension when +- `policy_document`: Specify the policy, in JSON. Data protection policy A data + protection policy must include two JSON blocks: The first block must include both a + DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array + lists the types of sensitive data that you want to mask. For more information about the + available options, see Types of data that you can mask. The Operation property with an + Audit action is required to find the sensitive data terms. This Audit action must contain a + FindingsDestination object. You can optionally use that FindingsDestination object to list + one or more destinations to send audit findings to. If you specify destinations such as log + groups, Firehose streams, and S3 buckets, they must already exist. The second block must + include both a DataIdentifer array and an Operation property with an Deidentify action. The + DataIdentifer array must exactly match the DataIdentifer array in the first block of the + policy. The Operation property with the Deidentify action is what actually masks the data, + and it must contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be + empty. For an example data protection policy, see the Examples section on this page. The + contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON + blocks, the policyDocument can also include Name, Description, and Version fields. The Name + is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in - policyDocument can be up to 30,720 characters. + policyDocument can be up to 30,720 characters long. Subscription filter policy A + subscription filter policy can include the following attributes in a JSON block: + DestinationArn The ARN of the destination to deliver log events to. Supported destinations + are: An Kinesis Data Streams data stream in the same account as the subscription policy, + for same-account delivery. An Firehose data stream in the same account as the + subscription policy, for same-account delivery. A Lambda function in the same account as + the subscription policy, for same-account delivery. A logical destination in a different + account created with PutDestination, for cross-account delivery. Kinesis Data Streams and + Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that + grants CloudWatch Logs permissions to deliver ingested log events to the destination + stream. You don't need to provide the ARN when you are working with a logical destination + for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered + stream of log events. DistributionThe method used to distribute log data to the + destination. By default, log data is grouped by log stream, but the grouping can be set to + Random for a more even distribution. This property is only applicable when the destination + is an Kinesis Data Streams data stream. - `policy_name`: A name for the policy. This must be unique within the account. -- `policy_type`: Currently the only valid value for this parameter is - DATA_PROTECTION_POLICY. +- `policy_type`: The type of policy that you're creating or updating. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"scope"`: Currently the only valid value for this parameter is GLOBAL, which specifies - that the data protection policy applies to all log groups in the account. If you omit this - parameter, the default of GLOBAL is used. +- `"scope"`: Currently the only valid value for this parameter is ALL, which specifies that + the data protection policy applies to all log groups in the account. If you omit this + parameter, the default of ALL is used. +- `"selectionCriteria"`: Use this parameter to apply the subscription filter policy to a + subset of log groups in the account. Currently, the only supported filter is LogGroupName + NOT IN []. The selectionCriteria string can be up to 25KB in length. The length is + determined by using its UTF-8 bytes. Using the selectionCriteria parameter is useful to + help prevent infinite loops. For more information, see Log recursion prevention. Specifing + selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY for policyType. """ function put_account_policy( policyDocument, @@ -1585,14 +2325,14 @@ term specified in either policy is masked. that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send - audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose - streams, and S3 buckets, they must already exist. The second block must include both a - DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer - array must exactly match the DataIdentifer array in the first block of the policy. The - Operation property with the Deidentify action is what actually masks the data, and it must - contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty. For - an example data protection policy, see the Examples section on this page. The contents of - the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the + audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 + buckets, they must already exist. The second block must include both a DataIdentifer + array and an Operation property with an Deidentify action. The DataIdentifer array must + exactly match the DataIdentifer array in the first block of the policy. The Operation + property with the Deidentify action is what actually masks the data, and it must contain + the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty. For an + example data protection policy, see the Examples section on this page. The contents of the + two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters. @@ -1633,6 +2373,221 @@ function put_data_protection_policy( ) end +""" + put_delivery_destination(delivery_destination_configuration, name) + put_delivery_destination(delivery_destination_configuration, name, params::Dict{String,<:Any}) + +Creates or updates a logical delivery destination. A delivery destination is an Amazon Web +Services resource that represents an Amazon Web Services service that logs can be sent to. +CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To +configure logs delivery between a supported Amazon Web Services service and a destination, +you must do the following: Create a delivery source, which is a logical object that +represents the resource that is actually sending the logs. For more information, see +PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is +a logical object that represents the actual delivery destination. If you are delivering +logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to +assign an IAM policy to the destination. This policy allows delivery to that destination. + Use CreateDelivery to create a delivery by pairing exactly one delivery source and one +delivery destination. For more information, see CreateDelivery. You can configure a +single delivery source to send logs to multiple destinations by creating multiple +deliveries. You can also create multiple deliveries to configure multiple delivery sources +to send logs to the same delivery destination. Only some Amazon Web Services services +support being configured as a delivery source. These services are listed as Supported [V2 +Permissions] in the table at Enabling logging from Amazon Web Services services. If you +use this operation to update an existing delivery destination, all the current delivery +destination parameters are overwritten with the new parameter values that you specify. + +# Arguments +- `delivery_destination_configuration`: A structure that contains the ARN of the Amazon Web + Services resource that will receive the logs. +- `name`: A name for this delivery destination. This name must be unique for all delivery + destinations in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"outputFormat"`: The format for the logs that this delivery destination will receive. +- `"tags"`: An optional list of key-value pairs to associate with the resource. For more + information about tagging, see Tagging Amazon Web Services resources +""" +function put_delivery_destination( + deliveryDestinationConfiguration, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "PutDeliveryDestination", + Dict{String,Any}( + "deliveryDestinationConfiguration" => deliveryDestinationConfiguration, + "name" => name, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_delivery_destination( + deliveryDestinationConfiguration, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "PutDeliveryDestination", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "deliveryDestinationConfiguration" => deliveryDestinationConfiguration, + "name" => name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_delivery_destination_policy(delivery_destination_name, delivery_destination_policy) + put_delivery_destination_policy(delivery_destination_name, delivery_destination_policy, params::Dict{String,<:Any}) + +Creates and assigns an IAM policy that grants permissions to CloudWatch Logs to deliver +logs cross-account to a specified destination in this account. To configure the delivery of +logs from an Amazon Web Services service in another account to a logs delivery destination +in the current account, you must do the following: Create a delivery source, which is a +logical object that represents the resource that is actually sending the logs. For more +information, see PutDeliverySource. Create a delivery destination, which is a logical +object that represents the actual delivery destination. For more information, see +PutDeliveryDestination. Use this operation in the destination account to assign an IAM +policy to the destination. This policy allows delivery to that destination. Create a +delivery by pairing exactly one delivery source and one delivery destination. For more +information, see CreateDelivery. Only some Amazon Web Services services support being +configured as a delivery source. These services are listed as Supported [V2 Permissions] in +the table at Enabling logging from Amazon Web Services services. The contents of the +policy must include two statements. One statement enables general logs delivery, and the +other allows delivery to the chosen destination. See the examples for the needed policies. + +# Arguments +- `delivery_destination_name`: The name of the delivery destination to assign this policy + to. +- `delivery_destination_policy`: The contents of the policy. + +""" +function put_delivery_destination_policy( + deliveryDestinationName, + deliveryDestinationPolicy; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "PutDeliveryDestinationPolicy", + Dict{String,Any}( + "deliveryDestinationName" => deliveryDestinationName, + "deliveryDestinationPolicy" => deliveryDestinationPolicy, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_delivery_destination_policy( + deliveryDestinationName, + deliveryDestinationPolicy, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "PutDeliveryDestinationPolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "deliveryDestinationName" => deliveryDestinationName, + "deliveryDestinationPolicy" => deliveryDestinationPolicy, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_delivery_source(log_type, name, resource_arn) + put_delivery_source(log_type, name, resource_arn, params::Dict{String,<:Any}) + +Creates or updates a logical delivery source. A delivery source represents an Amazon Web +Services resource that sends logs to an logs delivery destination. The destination can be +CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery +destination and an Amazon Web Services service that is supported as a delivery source, you +must do the following: Use PutDeliverySource to create a delivery source, which is a +logical object that represents the resource that is actually sending the logs. Use +PutDeliveryDestination to create a delivery destination, which is a logical object that +represents the actual delivery destination. For more information, see +PutDeliveryDestination. If you are delivering logs cross-account, you must use +PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the +destination. This policy allows delivery to that destination. Use CreateDelivery to +create a delivery by pairing exactly one delivery source and one delivery destination. For +more information, see CreateDelivery. You can configure a single delivery source to send +logs to multiple destinations by creating multiple deliveries. You can also create multiple +deliveries to configure multiple delivery sources to send logs to the same delivery +destination. Only some Amazon Web Services services support being configured as a delivery +source. These services are listed as Supported [V2 Permissions] in the table at Enabling +logging from Amazon Web Services services. If you use this operation to update an existing +delivery source, all the current delivery source parameters are overwritten with the new +parameter values that you specify. + +# Arguments +- `log_type`: Defines the type of log that the source is sending. For Amazon + CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Centerr, the valid value + is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, + AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. + +- `name`: A name for this delivery source. This name must be unique for all delivery + sources in your account. +- `resource_arn`: The ARN of the Amazon Web Services resource that is generating and + sending logs. For example, + arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: An optional list of key-value pairs to associate with the resource. For more + information about tagging, see Tagging Amazon Web Services resources +""" +function put_delivery_source( + logType, name, resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "PutDeliverySource", + Dict{String,Any}( + "logType" => logType, "name" => name, "resourceArn" => resourceArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_delivery_source( + logType, + name, + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "PutDeliverySource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "logType" => logType, "name" => name, "resourceArn" => resourceArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_destination(destination_name, role_arn, target_arn) put_destination(destination_name, role_arn, target_arn, params::Dict{String,<:Any}) @@ -1847,11 +2802,11 @@ and dimensions to the metric that is created. Metrics extracted from log events charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate -custom metric. CloudWatch Logs disables a metric filter if it generates 1,000 different -name/value pairs for your specified dimensions within a certain amount of time. This helps -to prevent accidental high charges. You can also set up a billing alarm to alert you if -your charges are higher than expected. For more information, see Creating a Billing Alarm -to Monitor Your Estimated Amazon Web Services Charges. +custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 +different name/value pairs for your specified dimensions within one hour. You can also set +up a billing alarm to alert you if your charges are higher than expected. For more +information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services +Charges. # Arguments - `filter_name`: A name for the metric filter. @@ -1930,6 +2885,8 @@ the logs:PutQueryDefinition permission to be able to perform this operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Used as an idempotency token, to avoid returning an exception if the + service receives the same request twice because of a network error. - `"logGroupNames"`: Use this parameter to include specific log groups as part of your query definition. If you are updating a query definition and you omit this parameter, then the updated definition will contain no log groups. @@ -1944,7 +2901,9 @@ function put_query_definition( ) return cloudwatch_logs( "PutQueryDefinition", - Dict{String,Any}("name" => name, "queryString" => queryString); + Dict{String,Any}( + "name" => name, "queryString" => queryString, "clientToken" => string(uuid4()) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1960,7 +2919,11 @@ function put_query_definition( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("name" => name, "queryString" => queryString), + Dict{String,Any}( + "name" => name, + "queryString" => queryString, + "clientToken" => string(uuid4()), + ), params, ), ); @@ -2022,7 +2985,11 @@ haven’t been deleted. Those log events will take up to 72 hours to be deleted new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the -earlier log events are deleted. +earlier log events are deleted. When log events reach their retention setting they are +marked for deletion. After they are marked for deletion, they do not add to your archival +storage costs anymore, even if they are not actually deleted until later. These log events +marked for deletion are also not included when you use an API to retrieve the storedBytes +value to see how many bytes a log group is storing. # Arguments - `log_group_name`: The name of the log group. @@ -2073,12 +3040,13 @@ through PutLogEvents and have them delivered to a specific destination. When log sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. - A logical destination that belongs to a different account, for cross-account delivery. -An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the -subscription filter, for same-account delivery. An Lambda function that belongs to the -same account as the subscription filter, for same-account delivery. Each log group can -have up to two subscription filters associated with it. If you are updating an existing -filter, you must specify the correct name in filterName. To perform a + A logical destination created with PutDestination that belongs to a different account, +for cross-account delivery. We currently support Kinesis Data Streams and Firehose as +logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the +same account as the subscription filter, for same-account delivery. An Lambda function +that belongs to the same account as the subscription filter, for same-account delivery. +Each log group can have up to two subscription filters associated with it. If you are +updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. @@ -2155,20 +3123,102 @@ function put_subscription_filter( ) end +""" + start_live_tail(log_group_identifiers) + start_live_tail(log_group_identifiers, params::Dict{String,<:Any}) + +Starts a Live Tail streaming session for one or more log groups. A Live Tail session +returns a stream of log events that have been recently ingested in the log groups. For more +information, see Use Live Tail to view logs in near real time. The response to this +operation is a response stream, over which the server sends live log events and the client +receives them. The following objects are sent over the stream: A single +LiveTailSessionStart object is sent at the start of the session. Every second, a +LiveTailSessionUpdate object is sent. Each of these objects contains an array of the actual +log events. If no new log events were ingested in the past second, the +LiveTailSessionUpdate object will contain an empty array. The array of log events contained +in a LiveTailSessionUpdate can include as many as 500 log events. If the number of log +events matching the request exceeds 500 per second, the log events are sampled down to 500 +log events to be included in each LiveTailSessionUpdate object. If your client consumes the +log events slower than the server produces them, CloudWatch Logs buffers up to 10 +LiveTailSessionUpdate events or 5000 log events, after which it starts dropping the oldest +events. A SessionStreamingException object is returned if an unknown error occurs on the +server side. A SessionTimeoutException object is returned when the session times out, +after it has been kept open for three hours. You can end a session before it times out +by closing the session stream or by closing the client that is receiving the stream. The +session also ends if the established connection between the client and the server breaks. +For examples of using an SDK to start a Live Tail session, see Start a Live Tail session +using an Amazon Web Services SDK. + +# Arguments +- `log_group_identifiers`: An array where each item in the array is a log group to include + in the Live Tail session. Specify each log group by its ARN. If you specify an ARN, the + ARN can't end with an asterisk (*). You can include up to 10 log groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"logEventFilterPattern"`: An optional pattern to use to filter the results to include + only log events that match the pattern. For example, a filter pattern of error 404 causes + only log events that include both error and 404 to be included in the Live Tail stream. + Regular expression filter patterns are supported. For more information about filter pattern + syntax, see Filter and Pattern Syntax. +- `"logStreamNamePrefixes"`: If you specify this parameter, then only log events in the log + streams that have names that start with the prefixes that you specify here are included in + the Live Tail session. If you specify this field, you can't also specify the logStreamNames + field. You can specify this parameter only if you specify only one log group in + logGroupIdentifiers. +- `"logStreamNames"`: If you specify this parameter, then only log events in the log + streams that you specify here are included in the Live Tail session. If you specify this + field, you can't also specify the logStreamNamePrefixes field. You can specify this + parameter only if you specify only one log group in logGroupIdentifiers. +""" +function start_live_tail( + logGroupIdentifiers; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "StartLiveTail", + Dict{String,Any}("logGroupIdentifiers" => logGroupIdentifiers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_live_tail( + logGroupIdentifiers, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "StartLiveTail", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("logGroupIdentifiers" => logGroupIdentifiers), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_query(end_time, query_string, start_time) start_query(end_time, query_string, start_time, params::Dict{String,<:Any}) Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch -Logs Insights Query Syntax. Queries time out after 60 minutes of runtime. If your queries -are timing out, reduce the time range being searched or partition your query into a number -of queries. If you are using CloudWatch cross-account observability, you can use this -operation in a monitoring account to start a query in a linked source account. For more -information, see CloudWatch cross-account observability. For a cross-account StartQuery -operation, the query definition must be defined in the monitoring account. You can have up -to 30 concurrent CloudWatch Logs insights queries, including queries that have been added -to dashboards. +Logs Insights Query Syntax. After you run a query using StartQuery, the query results are +stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, +using the queryId that StartQuery returns. If you have associated a KMS key with the query +results in this account, then StartQuery uses that key to encrypt the results when it +stores them. If no key is associated with query results, the query results are encrypted +with the default CloudWatch Logs encryption method. Queries time out after 60 minutes of +runtime. If your queries are timing out, reduce the time range being searched or partition +your query into a number of queries. If you are using CloudWatch cross-account +observability, you can use this operation in a monitoring account to start a query in a +linked source account. For more information, see CloudWatch cross-account observability. +For a cross-account StartQuery operation, the query definition must be defined in the +monitoring account. You can have up to 30 concurrent CloudWatch Logs insights queries, +including queries that have been added to dashboards. # Arguments - `end_time`: The end of the time range to query. The range is inclusive, so the specified @@ -2190,14 +3240,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account. If you specify an ARN, the ARN can't end with an asterisk (*). A StartQuery - operation must include exactly one of the following parameters: logGroupName, logGroupNames - or logGroupIdentifiers. + operation must include exactly one of the following parameters: logGroupName, + logGroupNames, or logGroupIdentifiers. - `"logGroupName"`: The log group on which to perform the query. A StartQuery operation - must include exactly one of the following parameters: logGroupName, logGroupNames or + must include exactly one of the following parameters: logGroupName, logGroupNames, or logGroupIdentifiers. - `"logGroupNames"`: The list of log groups to be queried. You can include up to 50 log groups. A StartQuery operation must include exactly one of the following parameters: - logGroupName, logGroupNames or logGroupIdentifiers. + logGroupName, logGroupNames, or logGroupIdentifiers. """ function start_query( endTime, queryString, startTime; aws_config::AbstractAWSConfig=global_aws_config() @@ -2502,3 +3552,114 @@ function untag_resource( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_anomaly(anomaly_detector_arn) + update_anomaly(anomaly_detector_arn, params::Dict{String,<:Any}) + +Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you +suppress an anomaly, CloudWatch Logs won’t report new occurrences of that anomaly and +won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won’t +report any anomalies related to that pattern. You must specify either anomalyId or +patternId, but you can't specify both parameters in the same operation. If you have +previously used this operation to suppress detection of a pattern or anomaly, you can use +it again to cause CloudWatch Logs to end the suppression. To do this, use this operation +and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and +suppressionPeriod parameters. + +# Arguments +- `anomaly_detector_arn`: The ARN of the anomaly detector that this operation is to act on. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"anomalyId"`: If you are suppressing or unsuppressing an anomaly, specify its unique ID + here. You can find anomaly IDs by using the ListAnomalies operation. +- `"patternId"`: If you are suppressing or unsuppressing an pattern, specify its unique ID + here. You can find pattern IDs by using the ListAnomalies operation. +- `"suppressionPeriod"`: If you are temporarily suppressing an anomaly or pattern, use this + structure to specify how long the suppression is to last. +- `"suppressionType"`: Use this to specify whether the suppression to be temporary or + infinite. If you specify LIMITED, you must also specify a suppressionPeriod. If you specify + INFINITE, any value for suppressionPeriod is ignored. +""" +function update_anomaly( + anomalyDetectorArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "UpdateAnomaly", + Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_anomaly( + anomalyDetectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "UpdateAnomaly", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_log_anomaly_detector(anomaly_detector_arn, enabled) + update_log_anomaly_detector(anomaly_detector_arn, enabled, params::Dict{String,<:Any}) + +Updates an existing log anomaly detector. + +# Arguments +- `anomaly_detector_arn`: The ARN of the anomaly detector that you want to update. +- `enabled`: Use this parameter to pause or restart the anomaly detector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"anomalyVisibilityTime"`: The number of days to use as the life cycle of anomalies. + After this time, anomalies are automatically baselined and the anomaly detector model will + treat new occurrences of similar event as normal. Therefore, if you do not correct the + cause of an anomaly during this time, it will be considered normal going forward and will + not be detected. +- `"evaluationFrequency"`: Specifies how often the anomaly detector runs and look for + anomalies. Set this value according to the frequency that the log group receives new logs. + For example, if the log group receives new log events every 10 minutes, then setting + evaluationFrequency to FIFTEEN_MIN might be appropriate. +- `"filterPattern"`: +""" +function update_log_anomaly_detector( + anomalyDetectorArn, enabled; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "UpdateLogAnomalyDetector", + Dict{String,Any}("anomalyDetectorArn" => anomalyDetectorArn, "enabled" => enabled); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_log_anomaly_detector( + anomalyDetectorArn, + enabled, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudwatch_logs( + "UpdateLogAnomalyDetector", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "anomalyDetectorArn" => anomalyDetectorArn, "enabled" => enabled + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/codeartifact.jl b/src/services/codeartifact.jl index 29e9cef78e..a7003f0ae3 100644 --- a/src/services/codeartifact.jl +++ b/src/services/codeartifact.jl @@ -20,7 +20,8 @@ connection. public:maven-central - for Maven Central. public:maven-googleandroid - for the Google Android repository. public:maven-gradleplugins - for the Gradle plugins repository. public:maven-commonsware - for the CommonsWare Android repository. public:maven-clojars - - for the Clojars repository. + - for the Clojars repository. public:ruby-gems-org - for RubyGems.org. + public:crates-io - for Crates.io. - `repository`: The name of the repository to which the external connection is added. # Optional Parameters @@ -99,12 +100,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"includeFromUpstream"`: Set to true to copy packages from repositories that are upstream from the source repository to the destination repository. The default setting is false. For more information, see Working with upstream repositories. -- `"namespace"`: The namespace of the package versions to be copied. The package version - component that specifies its namespace depends on its type. For example: The namespace - of a Maven package version is its groupId. The namespace is required when copying Maven - package versions. The namespace of an npm package version is its scope. Python and - NuGet package versions do not contain a corresponding component, package versions of those - formats do not have a namespace. The namespace of a generic package is its namespace. +- `"namespace"`: The namespace of the package versions to be copied. The package component + that specifies its namespace depends on its type. For example: The namespace is required + when copying package versions of the following formats: Maven Swift generic The + namespace of a Maven package version is its groupId. The namespace of an npm or Swift + package version is its scope. The namespace of a generic package is its namespace. + Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, + package versions of those formats do not have a namespace. - `"versionRevisions"`: A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion operation succeeds if the specified versions in the source repository match the specified package version revision. @@ -216,6 +218,58 @@ function create_domain( ) end +""" + create_package_group(domain, package_group) + create_package_group(domain, package_group, params::Dict{String,<:Any}) + + Creates a package group. For more information about creating package groups, including +example CLI commands, see Create a package group in the CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain in which you want to create a package group. +- `package_group`: The pattern of the package group to create. The pattern is also the + identifier of the package group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"contactInfo"`: The contact information for the created package group. +- `"description"`: A description of the package group. +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"tags"`: One or more tag key-value pairs for the package group. +""" +function create_package_group( + domain, packageGroup; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "POST", + "/v1/package-group", + Dict{String,Any}("domain" => domain, "packageGroup" => packageGroup); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_package_group( + domain, + packageGroup, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "POST", + "/v1/package-group", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "packageGroup" => packageGroup), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_repository(domain, repository) create_repository(domain, repository, params::Dict{String,<:Any}) @@ -224,7 +278,7 @@ end # Arguments - `domain`: The name of the domain that contains the created repository. -- `repository`: The name of the repository to create. +- `repository`: The name of the repository to create. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -363,11 +417,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the package to delete. The package component that - specifies its namespace depends on its type. For example: The namespace of a Maven - package is its groupId. The namespace is required when deleting Maven package versions. - The namespace of an npm package is its scope. Python and NuGet packages do not contain - corresponding components, packages of those formats do not have a namespace. The - namespace of a generic package is its namespace. + specifies its namespace depends on its type. For example: The namespace is required when + deleting packages of the following formats: Maven Swift generic The namespace + of a Maven package version is its groupId. The namespace of an npm or Swift package + version is its scope. The namespace of a generic package is its namespace. Python, + NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package + versions of those formats do not have a namespace. """ function delete_package( domain, format, package, repository; aws_config::AbstractAWSConfig=global_aws_config() @@ -413,6 +468,57 @@ function delete_package( ) end +""" + delete_package_group(domain, package-group) + delete_package_group(domain, package-group, params::Dict{String,<:Any}) + +Deletes a package group. Deleting a package group does not delete packages or package +versions associated with the package group. When a package group is deleted, the direct +child package groups will become children of the package group's direct parent package +group. Therefore, if any of the child groups are inheriting any settings from the parent, +those settings could change. + +# Arguments +- `domain`: The domain that contains the package group to be deleted. +- `package-group`: The pattern of the package group to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +""" +function delete_package_group( + domain, package_group; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "DELETE", + "/v1/package-group", + Dict{String,Any}("domain" => domain, "package-group" => package_group); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_package_group( + domain, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "DELETE", + "/v1/package-group", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "package-group" => package_group), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_package_versions(domain, format, package, repository, versions) delete_package_versions(domain, format, package, repository, versions, params::Dict{String,<:Any}) @@ -435,12 +541,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"expectedStatus"`: The expected status of the package version to delete. -- `"namespace"`: The namespace of the package versions to be deleted. The package version - component that specifies its namespace depends on its type. For example: The namespace - of a Maven package version is its groupId. The namespace is required when deleting Maven - package versions. The namespace of an npm package version is its scope. Python and - NuGet package versions do not contain a corresponding component, package versions of those - formats do not have a namespace. The namespace of a generic package is its namespace. +- `"namespace"`: The namespace of the package versions to be deleted. The package component + that specifies its namespace depends on its type. For example: The namespace is required + when deleting package versions of the following formats: Maven Swift generic + The namespace of a Maven package version is its groupId. The namespace of an npm or + Swift package version is its scope. The namespace of a generic package is its namespace. + Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding + component, package versions of those formats do not have a namespace. """ function delete_package_versions( domain, @@ -650,11 +757,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the requested package. The package component that - specifies its namespace depends on its type. For example: The namespace of a Maven - package is its groupId. The namespace is required when requesting Maven packages. The - namespace of an npm package is its scope. Python and NuGet packages do not contain a - corresponding component, packages of those formats do not have a namespace. The - namespace of a generic package is its namespace. + specifies its namespace depends on its type. For example: The namespace is required when + requesting packages of the following formats: Maven Swift generic The namespace + of a Maven package version is its groupId. The namespace of an npm or Swift package + version is its scope. The namespace of a generic package is its namespace. Python, + NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package + versions of those formats do not have a namespace. """ function describe_package( domain, format, package, repository; aws_config::AbstractAWSConfig=global_aws_config() @@ -700,6 +808,54 @@ function describe_package( ) end +""" + describe_package_group(domain, package-group) + describe_package_group(domain, package-group, params::Dict{String,<:Any}) + +Returns a PackageGroupDescription object that contains information about the requested +package group. + +# Arguments +- `domain`: The name of the domain that contains the package group. +- `package-group`: The pattern of the requested package group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +""" +function describe_package_group( + domain, package_group; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "GET", + "/v1/package-group", + Dict{String,Any}("domain" => domain, "package-group" => package_group); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_package_group( + domain, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "GET", + "/v1/package-group", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "package-group" => package_group), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_package_version(domain, format, package, repository, version) describe_package_version(domain, format, package, repository, version, params::Dict{String,<:Any}) @@ -719,12 +875,13 @@ package version. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. -- `"namespace"`: The namespace of the requested package version. The package version - component that specifies its namespace depends on its type. For example: The namespace - of a Maven package version is its groupId. The namespace of an npm package version is - its scope. Python and NuGet package versions do not contain a corresponding component, - package versions of those formats do not have a namespace. The namespace of a generic - package is its namespace. +- `"namespace"`: The namespace of the requested package version. The package component that + specifies its namespace depends on its type. For example: The namespace is required when + requesting package versions of the following formats: Maven Swift generic The + namespace of a Maven package version is its groupId. The namespace of an npm or Swift + package version is its scope. The namespace of a generic package is its namespace. + Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, + package versions of those formats do not have a namespace. """ function describe_package_version( domain, @@ -912,12 +1069,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"expectedStatus"`: The expected status of the package version to dispose. -- `"namespace"`: The namespace of the package versions to be disposed. The package version - component that specifies its namespace depends on its type. For example: The namespace - of a Maven package version is its groupId. The namespace of an npm package version is - its scope. Python and NuGet package versions do not contain a corresponding component, - package versions of those formats do not have a namespace. The namespace of a generic - package is its namespace. +- `"namespace"`: The namespace of the package versions to be disposed. The package + component that specifies its namespace depends on its type. For example: The namespace is + required when disposing package versions of the following formats: Maven Swift + generic The namespace of a Maven package version is its groupId. The namespace of + an npm or Swift package version is its scope. The namespace of a generic package is its + namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a + corresponding component, package versions of those formats do not have a namespace. - `"versionRevisions"`: The revisions of the package versions you want to dispose. """ function dispose_package_versions( @@ -972,6 +1130,72 @@ function dispose_package_versions( ) end +""" + get_associated_package_group(domain, format, package) + get_associated_package_group(domain, format, package, params::Dict{String,<:Any}) + +Returns the most closely associated package group to the specified package. This API does +not require that the package exist in any repository in the domain. As such, +GetAssociatedPackageGroup can be used to see which package group's origin configuration +applies to a package before that package is in a repository. This can be helpful to check +if public packages are blocked without ingesting them. For information package group +association and matching, see Package group definition syntax and matching behavior in the +CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain that contains the package from which to get the + associated package group. +- `format`: The format of the package from which to get the associated package group. +- `package`: The package from which to get the associated package group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"namespace"`: The namespace of the package from which to get the associated package + group. The package component that specifies its namespace depends on its type. For example: + The namespace is required when getting associated package groups from packages of the + following formats: Maven Swift generic The namespace of a Maven package version + is its groupId. The namespace of an npm or Swift package version is its scope. The + namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package + versions do not contain a corresponding component, package versions of those formats do not + have a namespace. +""" +function get_associated_package_group( + domain, format, package; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "GET", + "/v1/get-associated-package-group", + Dict{String,Any}("domain" => domain, "format" => format, "package" => package); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_associated_package_group( + domain, + format, + package, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "GET", + "/v1/get-associated-package-group", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "domain" => domain, "format" => format, "package" => package + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_authorization_token(domain) get_authorization_token(domain, params::Dict{String,<:Any}) @@ -1086,11 +1310,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the package version with the requested asset file. The - package version component that specifies its namespace depends on its type. For example: - The namespace of a Maven package version is its groupId. The namespace of an npm - package version is its scope. Python and NuGet package versions do not contain a - corresponding component, package versions of those formats do not have a namespace. The - namespace of a generic package is its namespace. + package component that specifies its namespace depends on its type. For example: The + namespace is required when requesting assets from package versions of the following + formats: Maven Swift generic The namespace of a Maven package version is its + groupId. The namespace of an npm or Swift package version is its scope. The + namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package + versions do not contain a corresponding component, package versions of those formats do not + have a namespace. - `"revision"`: The name of the package version revision that contains the requested asset. """ @@ -1172,10 +1398,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the package version with the requested readme file. The - package version component that specifies its namespace depends on its type. For example: - The namespace of an npm package version is its scope. Python and NuGet package versions - do not contain a corresponding component, package versions of those formats do not have a - namespace. + package component that specifies its namespace depends on its type. For example: The + namespace is required when requesting the readme from package versions of the following + formats: Maven Swift generic The namespace of a Maven package version is its + groupId. The namespace of an npm or Swift package version is its scope. The + namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package + versions do not contain a corresponding component, package versions of those formats do not + have a namespace. """ function get_package_version_readme( domain, @@ -1234,7 +1463,8 @@ end get_repository_endpoint(domain, format, repository, params::Dict{String,<:Any}) Returns the endpoint of a repository for a specific package format. A repository has one -endpoint for each package format: maven npm nuget pypi +endpoint for each package format: cargo generic maven npm nuget +pypi ruby swift # Arguments - `domain`: The name of the domain that contains the repository. @@ -1333,6 +1563,129 @@ function get_repository_permissions_policy( ) end +""" + list_allowed_repositories_for_group(domain, origin_restriction_type, package-group) + list_allowed_repositories_for_group(domain, origin_restriction_type, package-group, params::Dict{String,<:Any}) + +Lists the repositories in the added repositories list of the specified restriction type for +a package group. For more information about restriction types and added repository lists, +see Package group origin controls in the CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain that contains the package group from which to list + allowed repositories. +- `origin_restriction_type`: The origin configuration restriction type of which to list + allowed repositories. +- `package-group`: The pattern of the package group from which to list allowed repositories. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"max-results"`: The maximum number of results to return per page. +- `"next-token"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_allowed_repositories_for_group( + domain, + originRestrictionType, + package_group; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "GET", + "/v1/package-group-allowed-repositories", + Dict{String,Any}( + "domain" => domain, + "originRestrictionType" => originRestrictionType, + "package-group" => package_group, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_allowed_repositories_for_group( + domain, + originRestrictionType, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "GET", + "/v1/package-group-allowed-repositories", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "domain" => domain, + "originRestrictionType" => originRestrictionType, + "package-group" => package_group, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_associated_packages(domain, package-group) + list_associated_packages(domain, package-group, params::Dict{String,<:Any}) + +Returns a list of packages associated with the requested package group. For information +package group association and matching, see Package group definition syntax and matching +behavior in the CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain that contains the package group from which to list + associated packages. +- `package-group`: The pattern of the package group from which to list associated + packages. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"max-results"`: The maximum number of results to return per page. +- `"next-token"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"preview"`: When this flag is included, ListAssociatedPackages will return a list of + packages that would be associated with a package group, even if it does not exist. +""" +function list_associated_packages( + domain, package_group; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "GET", + "/v1/list-associated-packages", + Dict{String,Any}("domain" => domain, "package-group" => package_group); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_associated_packages( + domain, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "GET", + "/v1/list-associated-packages", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "package-group" => package_group), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_domains() list_domains(params::Dict{String,<:Any}) @@ -1364,6 +1717,46 @@ function list_domains( ) end +""" + list_package_groups(domain) + list_package_groups(domain, params::Dict{String,<:Any}) + +Returns a list of package groups in the requested domain. + +# Arguments +- `domain`: The domain for which you want to list package groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"max-results"`: The maximum number of results to return per page. +- `"next-token"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"prefix"`: A prefix for which to search package groups. When included, + ListPackageGroups will return only package groups with patterns that match the prefix. +""" +function list_package_groups(domain; aws_config::AbstractAWSConfig=global_aws_config()) + return codeartifact( + "POST", + "/v1/package-groups", + Dict{String,Any}("domain" => domain); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_package_groups( + domain, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "POST", + "/v1/package-groups", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("domain" => domain), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_package_version_assets(domain, format, package, repository, version) list_package_version_assets(domain, format, package, repository, version, params::Dict{String,<:Any}) @@ -1385,11 +1778,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys owns the domain. It does not include dashes or spaces. - `"max-results"`: The maximum number of results to return per page. - `"namespace"`: The namespace of the package version that contains the requested package - version assets. The package version component that specifies its namespace depends on its - type. For example: The namespace of a Maven package version is its groupId. The - namespace of an npm package version is its scope. Python and NuGet package versions do - not contain a corresponding component, package versions of those formats do not have a - namespace. The namespace of a generic package is its namespace. + version assets. The package component that specifies its namespace depends on its type. For + example: The namespace is required requesting assets from package versions of the + following formats: Maven Swift generic The namespace of a Maven package version + is its groupId. The namespace of an npm or Swift package version is its scope. The + namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package + versions do not contain a corresponding component, package versions of those formats do not + have a namespace. - `"next-token"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ @@ -1468,11 +1863,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the package version with the requested dependencies. The - package version component that specifies its namespace depends on its type. For example: - The namespace of a Maven package version is its groupId. The namespace of an npm - package version is its scope. Python and NuGet package versions do not contain a - corresponding component, package versions of those formats do not have a namespace. The - namespace of a generic package is its namespace. + package component that specifies its namespace depends on its type. For example: The + namespace is required when listing dependencies from package versions of the following + formats: Maven The namespace of a Maven package version is its groupId. The + namespace of an npm package version is its scope. Python and NuGet package versions do + not contain a corresponding component, package versions of those formats do not have a + namespace. - `"next-token"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ @@ -1549,11 +1945,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys owns the domain. It does not include dashes or spaces. - `"max-results"`: The maximum number of results to return per page. - `"namespace"`: The namespace of the package that contains the requested package versions. - The package component that specifies its namespace depends on its type. For example: The - namespace of a Maven package is its groupId. The namespace of an npm package is its - scope. Python and NuGet packages do not contain a corresponding component, packages of - those formats do not have a namespace. The namespace of a generic package is its - namespace. + The package component that specifies its namespace depends on its type. For example: The + namespace is required when deleting package versions of the following formats: Maven + Swift generic The namespace of a Maven package version is its groupId. The + namespace of an npm or Swift package version is its scope. The namespace of a generic + package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain + a corresponding component, package versions of those formats do not have a namespace. - `"next-token"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - `"originType"`: The originType used to filter package versions. Only package versions @@ -1628,9 +2025,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a namespace that starts with the provided string value are returned. Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior. Each package format uses namespace as follows: The namespace of a Maven - package is its groupId. The namespace of an npm package is its scope. Python and - NuGet packages do not contain a corresponding component, packages of those formats do not - have a namespace. The namespace of a generic package is its namespace. + package version is its groupId. The namespace of an npm or Swift package version is its + scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and + Cargo package versions do not contain a corresponding component, package versions of those + formats do not have a namespace. - `"next-token"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - `"package-prefix"`: A prefix used to filter requested packages. Only packages with names @@ -1752,6 +2150,59 @@ function list_repositories_in_domain( ) end +""" + list_sub_package_groups(domain, package-group) + list_sub_package_groups(domain, package-group, params::Dict{String,<:Any}) + +Returns a list of direct children of the specified package group. For information package +group hierarchy, see Package group definition syntax and matching behavior in the +CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain which contains the package group from which to list sub + package groups. +- `package-group`: The pattern of the package group from which to list sub package groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"max-results"`: The maximum number of results to return per page. +- `"next-token"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_sub_package_groups( + domain, package_group; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "POST", + "/v1/package-groups/sub-groups", + Dict{String,Any}("domain" => domain, "package-group" => package_group); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_sub_package_groups( + domain, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "POST", + "/v1/package-groups/sub-groups", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "package-group" => package_group), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1810,7 +2261,7 @@ CodeArtifact User Guide. - `domain`: The name of the domain that contains the repository that contains the package version to publish. - `format`: A format that specifies the type of the package version with the requested - asset file. + asset file. The only supported value is generic. - `package`: The name of the package version to publish. - `repository`: The name of the repository that the package version will be published to. - `version`: The package version to publish (for example, 3.5.2). @@ -1981,9 +2432,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys owns the domain. It does not include dashes or spaces. - `"namespace"`: The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven - package is its groupId. The namespace of an npm package is its scope. Python and - NuGet packages do not contain a corresponding component, packages of those formats do not - have a namespace. The namespace of a generic package is its namespace. + package version is its groupId. The namespace of an npm or Swift package version is its + scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and + Cargo package versions do not contain a corresponding component, package versions of those + formats do not have a namespace. """ function put_package_origin_configuration( domain, @@ -2189,6 +2641,118 @@ function untag_resource( ) end +""" + update_package_group(domain, package_group) + update_package_group(domain, package_group, params::Dict{String,<:Any}) + +Updates a package group. This API cannot be used to update a package group's origin +configuration or pattern. To update a package group's origin configuration, use +UpdatePackageGroupOriginConfiguration. + +# Arguments +- `domain`: The name of the domain which contains the package group to be updated. +- `package_group`: The pattern of the package group to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"contactInfo"`: Contact information which you want to update the requested package + group with. +- `"description"`: The description you want to update the requested package group with. +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +""" +function update_package_group( + domain, packageGroup; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "PUT", + "/v1/package-group", + Dict{String,Any}("domain" => domain, "packageGroup" => packageGroup); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_package_group( + domain, + packageGroup, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "PUT", + "/v1/package-group", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "packageGroup" => packageGroup), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_package_group_origin_configuration(domain, package-group) + update_package_group_origin_configuration(domain, package-group, params::Dict{String,<:Any}) + +Updates the package origin configuration for a package group. The package origin +configuration determines how new versions of a package can be added to a repository. You +can allow or block direct publishing of new package versions, or ingestion and retaining of +new package versions from an external connection or upstream source. For more information +about package group origin controls and configuration, see Package group origin controls in +the CodeArtifact User Guide. + +# Arguments +- `domain`: The name of the domain which contains the package group for which to update + the origin configuration. +- `package-group`: The pattern of the package group for which to update the origin + configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"addAllowedRepositories"`: The repository name and restrictions to add to the allowed + repository list of the specified package group. +- `"domain-owner"`: The 12-digit account number of the Amazon Web Services account that + owns the domain. It does not include dashes or spaces. +- `"removeAllowedRepositories"`: The repository name and restrictions to remove from the + allowed repository list of the specified package group. +- `"restrictions"`: The origin configuration settings that determine how package versions + can enter repositories. +""" +function update_package_group_origin_configuration( + domain, package_group; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeartifact( + "PUT", + "/v1/package-group-origin-configuration", + Dict{String,Any}("domain" => domain, "package-group" => package_group); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_package_group_origin_configuration( + domain, + package_group, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeartifact( + "PUT", + "/v1/package-group-origin-configuration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "package-group" => package_group), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_package_versions_status(domain, format, package, repository, target_status, versions) update_package_versions_status(domain, format, package, repository, target_status, versions, params::Dict{String,<:Any}) @@ -2216,12 +2780,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"expectedStatus"`: The package version’s expected status before it is updated. If expectedStatus is provided, the package version's status is updated only if its status at the time UpdatePackageVersionsStatus is called matches expectedStatus. -- `"namespace"`: The namespace of the package version to be updated. The package version - component that specifies its namespace depends on its type. For example: The namespace - of a Maven package version is its groupId. The namespace of an npm package version is - its scope. Python and NuGet package versions do not contain a corresponding component, - package versions of those formats do not have a namespace. The namespace of a generic - package is its namespace. +- `"namespace"`: The namespace of the package version to be updated. The package component + that specifies its namespace depends on its type. For example: The namespace of a Maven + package version is its groupId. The namespace of an npm or Swift package version is its + scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and + Cargo package versions do not contain a corresponding component, package versions of those + formats do not have a namespace. - `"versionRevisions"`: A map of package versions and package version revisions. The map key is the package version (for example, 3.5.2), and the map value is the package version revision. diff --git a/src/services/codebuild.jl b/src/services/codebuild.jl index 654b625cbd..e4a06a4f61 100644 --- a/src/services/codebuild.jl +++ b/src/services/codebuild.jl @@ -91,6 +91,35 @@ function batch_get_builds( ) end +""" + batch_get_fleets(names) + batch_get_fleets(names, params::Dict{String,<:Any}) + +Gets information about one or more compute fleets. + +# Arguments +- `names`: The names or ARNs of the compute fleets. + +""" +function batch_get_fleets(names; aws_config::AbstractAWSConfig=global_aws_config()) + return codebuild( + "BatchGetFleets", + Dict{String,Any}("names" => names); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_fleets( + names, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codebuild( + "BatchGetFleets", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("names" => names), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_projects(names) batch_get_projects(names, params::Dict{String,<:Any}) @@ -193,6 +222,115 @@ function batch_get_reports( ) end +""" + create_fleet(base_capacity, compute_type, environment_type, name) + create_fleet(base_capacity, compute_type, environment_type, name, params::Dict{String,<:Any}) + +Creates a compute fleet. + +# Arguments +- `base_capacity`: The initial number of machines allocated to the fleet, which defines + the number of builds that can run in parallel. +- `compute_type`: Information about the compute resources the compute fleet uses. Available + values include: BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds. + BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. + BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your + environment type. BUILD_GENERAL1_XLARGE: Use up to 70 GB memory and 36 vCPUs for builds, + depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 + vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to + 100 GB uncompressed. If you use BUILD_GENERAL1_SMALL: For environment type + LINUX_CONTAINER, you can use up to 3 GB memory and 2 vCPUs for builds. For environment + type LINUX_GPU_CONTAINER, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor + Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GB memory + and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For + environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds. + For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 + NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to + 16 GB memory and 8 vCPUs on ARM-based processors for builds. For more information, see + Build environment compute types in the CodeBuild User Guide. +- `environment_type`: The environment type of the compute fleet. The environment type + ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West + (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific + (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo). The + environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US + East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia + Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific + (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East + (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific + (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is + available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia + Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The + environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. + Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific + (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and + Asia Pacific (Mumbai). For more information, see Build environment compute types in the + CodeBuild user guide. +- `name`: The name of the compute fleet. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"fleetServiceRole"`: The service role associated with the compute fleet. For more + information, see Allow a user to add a permission policy for a fleet service role in the + CodeBuild User Guide. +- `"overflowBehavior"`: The compute fleet overflow behavior. For overflow behavior QUEUE, + your overflow builds need to wait on the existing fleet instance to become available. For + overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you + choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, + make sure that you add the required VPC permissions to your project service role. For more + information, see Example policy statement to allow CodeBuild access to Amazon Web Services + services required to create a VPC network interface. +- `"scalingConfiguration"`: The scaling configuration of the compute fleet. +- `"tags"`: A list of tag key and value pairs associated with this compute fleet. These + tags are available for use by Amazon Web Services services that support CodeBuild build + project tags. +- `"vpcConfig"`: +""" +function create_fleet( + baseCapacity, + computeType, + environmentType, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codebuild( + "CreateFleet", + Dict{String,Any}( + "baseCapacity" => baseCapacity, + "computeType" => computeType, + "environmentType" => environmentType, + "name" => name, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_fleet( + baseCapacity, + computeType, + environmentType, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codebuild( + "CreateFleet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "baseCapacity" => baseCapacity, + "computeType" => computeType, + "environmentType" => environmentType, + "name" => name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_project(artifacts, environment, name, service_role, source) create_project(artifacts, environment, name, service_role, source, params::Dict{String,<:Any}) @@ -244,21 +382,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not - specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, - branch name, or tag name that corresponds to the version of the source code you want to - build. If a branch name is specified, the branch's HEAD commit ID is used. If not - specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of - the object that represents the build input ZIP file to use. If sourceVersion is specified - at the build level, then that version takes precedence over this sourceVersion (at the - project level). For more information, see Source Version Sample with CodeBuild in the - CodeBuild User Guide. + specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, + branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that + corresponds to the version of the source code you want to build. If a branch name is + specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD + commit ID is used. For Amazon S3: the version ID of the object that represents the build + input ZIP file to use. If sourceVersion is specified at the build level, then that + version takes precedence over this sourceVersion (at the project level). For more + information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. - `"tags"`: A list of tag key and value pairs associated with this build project. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. -- `"timeoutInMinutes"`: How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to +- `"timeoutInMinutes"`: How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes. -- `"vpcConfig"`: VpcConfig enables CodeBuild to access resources in an Amazon VPC. +- `"vpcConfig"`: VpcConfig enables CodeBuild to access resources in an Amazon VPC. If + you're using compute fleets during project creation, do not provide vpcConfig. """ function create_project( artifacts, @@ -389,6 +528,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys webhooks are triggered. At least one WebhookFilter in the array must specify EVENT as its type. For a build to be triggered, at least one filter group in the filterGroups array must pass. For a filter group to pass, each of its filters must pass. +- `"manualCreation"`: If manualCreation is true, CodeBuild doesn't create a webhook in + GitHub and instead returns payloadUrl and secret values for the webhook. The payloadUrl and + secret values in the output can be used to manually create a webhook within GitHub. + manualCreation is only available for GitHub webhooks. +- `"scopeConfiguration"`: The scope configuration for global or organization webhooks. + Global or organization webhooks are only available for GitHub and Github Enterprise + webhooks. """ function create_webhook(projectName; aws_config::AbstractAWSConfig=global_aws_config()) return codebuild( @@ -442,6 +588,35 @@ function delete_build_batch( ) end +""" + delete_fleet(arn) + delete_fleet(arn, params::Dict{String,<:Any}) + +Deletes a compute fleet. When you delete a compute fleet, its builds are not deleted. + +# Arguments +- `arn`: The ARN of the compute fleet. + +""" +function delete_fleet(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return codebuild( + "DeleteFleet", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_fleet( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codebuild( + "DeleteFleet", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_project(name) delete_project(name, params::Dict{String,<:Any}) @@ -826,11 +1001,13 @@ code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. # Arguments - `auth_type`: The type of authentication used to connect to a GitHub, GitHub Enterprise, - or Bitbucket repository. An OAUTH connection is not supported by the API and must be - created using the CodeBuild console. + GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported + by the API and must be created using the CodeBuild console. Note that CODECONNECTIONS is + only valid for GitLab and GitLab Self Managed. - `server_type`: The source provider used for this project. - `token`: For GitHub or GitHub Enterprise, this is the personal access token. For - Bitbucket, this is the app password. + Bitbucket, this is either the access token or the app password. For the authType + CODECONNECTIONS, this is the connectionArn. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1078,6 +1255,42 @@ function list_curated_environment_images( ) end +""" + list_fleets() + list_fleets(params::Dict{String,<:Any}) + +Gets a list of compute fleet names with each compute fleet name representing a single +compute fleet. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of paginated compute fleets returned per response. Use + nextToken to iterate pages in the list of returned compute fleets. +- `"nextToken"`: During a previous call, if there are more than 100 items in the list, only + the first 100 items are returned, along with a unique string called a nextToken. To get the + next batch of items in the list, call this operation again, adding the next token to the + call. To get all of the items in the list, keep calling this operation with each subsequent + next token that is returned, until no more next tokens are returned. +- `"sortBy"`: The criterion to be used to list compute fleet names. Valid values include: + CREATED_TIME: List based on when each compute fleet was created. LAST_MODIFIED_TIME: + List based on when information about each compute fleet was last changed. NAME: List + based on each compute fleet's name. Use sortOrder to specify in what order to list the + compute fleet names based on the preceding criteria. +- `"sortOrder"`: The order in which to list compute fleets. Valid values include: + ASCENDING: List in ascending order. DESCENDING: List in descending order. Use sortBy + to specify the criterion to be used to list compute fleet names. +""" +function list_fleets(; aws_config::AbstractAWSConfig=global_aws_config()) + return codebuild("ListFleets"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_fleets( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codebuild( + "ListFleets", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_projects() list_projects(params::Dict{String,<:Any}) @@ -1434,7 +1647,11 @@ end start_build(project_name) start_build(project_name, params::Dict{String,<:Any}) -Starts running a build. +Starts running a build with the settings defined in the project. These setting include: how +to run a build, where to get the source code, which build environment to use, which build +commands to run, and where to store the build output. You can also start a build run by +overriding some of the build settings in the project. The overrides only apply for that +specific start build request. The settings in the project are unaltered. # Arguments - `project_name`: The name of the CodeBuild build project to start running a build. @@ -1446,15 +1663,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"buildStatusConfigOverride"`: Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET. -- `"buildspecOverride"`: A buildspec file declaration that overrides, for this build only, - the latest one already defined in the build project. If this value is set, it can be - either an inline buildspec definition, the path to an alternate buildspec file relative to - the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 - bucket. The bucket must be in the same Amazon Web Services Region as the build project. - Specify the buildspec file using its ARN (for example, - arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set - to an empty string, the source code must contain a buildspec file in its root directory. - For more information, see Buildspec File Name and Storage Location. +- `"buildspecOverride"`: A buildspec file declaration that overrides the latest one defined + in the build project, for this build only. The buildspec defined on the project is not + changed. If this value is set, it can be either an inline buildspec definition, the path to + an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR + environment variable, or the path to an S3 bucket. The bucket must be in the same Amazon + Web Services Region as the build project. Specify the buildspec file using its ARN (for + example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or + is set to an empty string, the source code must contain a buildspec file in its root + directory. For more information, see Buildspec File Name and Storage Location. Since this + property allows you to change the build commands that will run in the container, you should + note that an IAM principal with the ability to call this API and set this parameter can + override the default settings. Moreover, we encourage that you use a trustworthy buildspec + location like a file in your source repository or a Amazon S3 bucket. - `"cacheOverride"`: A ProjectCache object specified for this build that overrides the one defined in the build project. - `"certificateOverride"`: The name of a certificate for this build that overrides the one @@ -1473,6 +1694,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified in the build project. - `"environmentVariablesOverride"`: A set of environment variables that overrides, for this build only, the latest ones already defined in the build project. +- `"fleetOverride"`: A ProjectFleet object specified for this build that overrides the one + defined in the build project. - `"gitCloneDepthOverride"`: The user-defined depth of history, with a minimum value of 0, that overrides, for this build only, any previous depth of history defined in the build project. @@ -1519,7 +1742,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified in the build project. - `"sourceAuthOverride"`: An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is - BitBucket or GitHub. + BitBucket, GitHub, GitLab, or GitLab Self Managed. - `"sourceLocationOverride"`: A location that overrides, for this build, the source location for the one defined in the build project. - `"sourceTypeOverride"`: A source input type, for this build, that overrides the source @@ -1530,15 +1753,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD - commit ID is used. If not specified, the default branch's HEAD commit ID is used. - Bitbucket The commit ID, branch name, or tag name that corresponds to the version of the - source code you want to build. If a branch name is specified, the branch's HEAD commit ID - is used. If not specified, the default branch's HEAD commit ID is used. Amazon S3 The - version ID of the object that represents the build input ZIP file to use. If - sourceVersion is specified at the project level, then this sourceVersion (at the build - level) takes precedence. For more information, see Source Version Sample with CodeBuild in - the CodeBuild User Guide. -- `"timeoutInMinutesOverride"`: The number of build timeout minutes, from 5 to 480 (8 + commit ID is used. If not specified, the default branch's HEAD commit ID is used. GitLab + The commit ID, branch, or Git tag to use. Bitbucket The commit ID, branch name, or tag + name that corresponds to the version of the source code you want to build. If a branch name + is specified, the branch's HEAD commit ID is used. If not specified, the default branch's + HEAD commit ID is used. Amazon S3 The version ID of the object that represents the build + input ZIP file to use. If sourceVersion is specified at the project level, then this + sourceVersion (at the build level) takes precedence. For more information, see Source + Version Sample with CodeBuild in the CodeBuild User Guide. +- `"timeoutInMinutesOverride"`: The number of build timeout minutes, from 5 to 2160 (36 hours), that overrides, for this build only, the latest setting already defined in the build project. """ @@ -1755,6 +1978,88 @@ function stop_build_batch( ) end +""" + update_fleet(arn) + update_fleet(arn, params::Dict{String,<:Any}) + +Updates a compute fleet. + +# Arguments +- `arn`: The ARN of the compute fleet. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"baseCapacity"`: The initial number of machines allocated to the compute fleet, which + defines the number of builds that can run in parallel. +- `"computeType"`: Information about the compute resources the compute fleet uses. + Available values include: BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for + builds. BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. + BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your + environment type. BUILD_GENERAL1_XLARGE: Use up to 70 GB memory and 36 vCPUs for builds, + depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 + vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to + 100 GB uncompressed. If you use BUILD_GENERAL1_SMALL: For environment type + LINUX_CONTAINER, you can use up to 3 GB memory and 2 vCPUs for builds. For environment + type LINUX_GPU_CONTAINER, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor + Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GB memory + and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For + environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds. + For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 + NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to + 16 GB memory and 8 vCPUs on ARM-based processors for builds. For more information, see + Build environment compute types in the CodeBuild User Guide. +- `"environmentType"`: The environment type of the compute fleet. The environment type + ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West + (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific + (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo). The + environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US + East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia + Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific + (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East + (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific + (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is + available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia + Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The + environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. + Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific + (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and + Asia Pacific (Mumbai). For more information, see Build environment compute types in the + CodeBuild user guide. +- `"fleetServiceRole"`: The service role associated with the compute fleet. For more + information, see Allow a user to add a permission policy for a fleet service role in the + CodeBuild User Guide. +- `"overflowBehavior"`: The compute fleet overflow behavior. For overflow behavior QUEUE, + your overflow builds need to wait on the existing fleet instance to become available. For + overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you + choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, + make sure that you add the required VPC permissions to your project service role. For more + information, see Example policy statement to allow CodeBuild access to Amazon Web Services + services required to create a VPC network interface. +- `"scalingConfiguration"`: The scaling configuration of the compute fleet. +- `"tags"`: A list of tag key and value pairs associated with this compute fleet. These + tags are available for use by Amazon Web Services services that support CodeBuild build + project tags. +- `"vpcConfig"`: +""" +function update_fleet(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return codebuild( + "UpdateFleet", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_fleet( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codebuild( + "UpdateFleet", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_project(name) update_project(name, params::Dict{String,<:Any}) @@ -1808,18 +2113,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not - specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, - branch name, or tag name that corresponds to the version of the source code you want to - build. If a branch name is specified, the branch's HEAD commit ID is used. If not - specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of - the object that represents the build input ZIP file to use. If sourceVersion is - specified at the build level, then that version takes precedence over this sourceVersion - (at the project level). For more information, see Source Version Sample with CodeBuild in - the CodeBuild User Guide. + specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, + branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that + corresponds to the version of the source code you want to build. If a branch name is + specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD + commit ID is used. For Amazon S3: the version ID of the object that represents the build + input ZIP file to use. If sourceVersion is specified at the build level, then that + version takes precedence over this sourceVersion (at the project level). For more + information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. - `"tags"`: An updated list of tag key and value pairs associated with this build project. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. -- `"timeoutInMinutes"`: The replacement value in minutes, from 5 to 480 (8 hours), for +- `"timeoutInMinutes"`: The replacement value in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. - `"vpcConfig"`: VpcConfig enables CodeBuild to access resources in an Amazon VPC. """ @@ -1854,15 +2159,14 @@ builds that were run when the project was private, are available to the general All build logs and artifacts are available to the public. Environment variables, source code, and other sensitive information may have been output to the build logs and artifacts. You must be careful about what information is output to the build logs. Some best practice -are: Do not store sensitive values, especially Amazon Web Services access key IDs and -secret access keys, in environment variables. We recommend that you use an Amazon EC2 -Systems Manager Parameter Store or Secrets Manager to store sensitive values. Follow Best -practices for using webhooks in the CodeBuild User Guide to limit which entities can -trigger a build, and do not store the buildspec in the project itself, to ensure that your -webhooks are as secure as possible. A malicious user can use public builds to -distribute malicious artifacts. We recommend that you review all pull requests to verify -that the pull request is a legitimate change. We also recommend that you validate any -artifacts with their checksums to make sure that the correct artifacts are being +are: Do not store sensitive values in environment variables. We recommend that you use an +Amazon EC2 Systems Manager Parameter Store or Secrets Manager to store sensitive values. +Follow Best practices for using webhooks in the CodeBuild User Guide to limit which +entities can trigger a build, and do not store the buildspec in the project itself, to +ensure that your webhooks are as secure as possible. A malicious user can use public +builds to distribute malicious artifacts. We recommend that you review all pull requests to +verify that the pull request is a legitimate change. We also recommend that you validate +any artifacts with their checksums to make sure that the correct artifacts are being downloaded. # Arguments diff --git a/src/services/codecatalyst.jl b/src/services/codecatalyst.jl index c0341f9cc2..23d0432511 100644 --- a/src/services/codecatalyst.jl +++ b/src/services/codecatalyst.jl @@ -80,6 +80,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Environments consume compute minutes when running. - `"repositories"`: The source repository that contains the branch to clone into the Dev Environment. +- `"vpcConnectionName"`: The name of the connection that will be used to connect to Amazon + VPC, if any. """ function create_dev_environment( instanceType, @@ -167,6 +169,49 @@ function create_project( ) end +""" + create_source_repository(name, project_name, space_name) + create_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Creates an empty Git-based source repository in a specified project. The repository is +created with an initial empty commit with a default branch named main. + +# Arguments +- `name`: The name of the source repository. For more information about name requirements, + see Quotas for source repositories. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the source repository. +""" +function create_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_source_repository_branch(name, project_name, source_repository_name, space_name) create_source_repository_branch(name, project_name, source_repository_name, space_name, params::Dict{String,<:Any}) @@ -288,6 +333,111 @@ function delete_dev_environment( ) end +""" + delete_project(name, space_name) + delete_project(name, space_name, params::Dict{String,<:Any}) + +Deletes a project in a space. + +# Arguments +- `name`: The name of the project in the space. To retrieve a list of project names, use + ListProjects. +- `space_name`: The name of the space. + +""" +function delete_project(name, spaceName; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_project( + name, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_source_repository(name, project_name, space_name) + delete_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Deletes a source repository in Amazon CodeCatalyst. You cannot use this API to delete a +linked repository. It can only be used to delete a Amazon CodeCatalyst source repository. + +# Arguments +- `name`: The name of the source repository. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function delete_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_space(name) + delete_space(name, params::Dict{String,<:Any}) + +Deletes a space. Deleting a space cannot be undone. Additionally, since space names must +be unique across Amazon CodeCatalyst, you cannot reuse names of deleted spaces. + +# Arguments +- `name`: The name of the space. To retrieve a list of space names, use ListSpaces. + +""" +function delete_space(name; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "DELETE", + "/v1/spaces/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_space( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_dev_environment(id, project_name, space_name) get_dev_environment(id, project_name, space_name, params::Dict{String,<:Any}) @@ -362,6 +512,44 @@ function get_project( ) end +""" + get_source_repository(name, project_name, space_name) + get_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Returns information about a source repository. + +# Arguments +- `name`: The name of the source repository. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function get_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_source_repository_clone_urls(project_name, source_repository_name, space_name) get_source_repository_clone_urls(project_name, source_repository_name, space_name, params::Dict{String,<:Any}) @@ -492,6 +680,83 @@ function get_user_details( ) end +""" + get_workflow(id, project_name, space_name) + get_workflow(id, project_name, space_name, params::Dict{String,<:Any}) + +Returns information about a workflow. + +# Arguments +- `id`: The ID of the workflow. To rerieve a list of workflow IDs, use ListWorkflows. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function get_workflow( + id, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflows/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workflow( + id, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflows/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_workflow_run(id, project_name, space_name) + get_workflow_run(id, project_name, space_name, params::Dict{String,<:Any}) + +Returns information about a specified run of a workflow. + +# Arguments +- `id`: The ID of the workflow run. To retrieve a list of workflow run IDs, use + ListWorkflowRuns. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function get_workflow_run( + id, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workflow_run( + id, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_access_tokens() list_access_tokens(params::Dict{String,<:Any}) @@ -573,13 +838,12 @@ function list_dev_environment_sessions( end """ - list_dev_environments(project_name, space_name) - list_dev_environments(project_name, space_name, params::Dict{String,<:Any}) + list_dev_environments(space_name) + list_dev_environments(space_name, params::Dict{String,<:Any}) Retrieves a list of Dev Environments in a project. # Arguments -- `project_name`: The name of the project in the space. - `space_name`: The name of the space. # Optional Parameters @@ -591,26 +855,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys NextToken element, which you can use to obtain additional results. - `"nextToken"`: A token returned from a call to this API to indicate the next batch of results to return, if any. +- `"projectName"`: The name of the project in the space. """ -function list_dev_environments( - projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() -) +function list_dev_environments(spaceName; aws_config::AbstractAWSConfig=global_aws_config()) return codecatalyst( "POST", - "/v1/spaces/$(spaceName)/projects/$(projectName)/devEnvironments"; + "/v1/spaces/$(spaceName)/devEnvironments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function list_dev_environments( - projectName, spaceName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return codecatalyst( "POST", - "/v1/spaces/$(spaceName)/projects/$(projectName)/devEnvironments", + "/v1/spaces/$(spaceName)/devEnvironments", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -621,8 +883,14 @@ end list_event_logs(end_time, space_name, start_time) list_event_logs(end_time, space_name, start_time, params::Dict{String,<:Any}) -Retrieves a list of events that occurred during a specified time period in a space. You can -use these events to audit user and system activity in a space. +Retrieves a list of events that occurred during a specific time in a space. You can use +these events to audit user and system activity in a space. For more information, see +Monitoring in the Amazon CodeCatalyst User Guide. ListEventLogs guarantees events for the +last 30 days in a given space. You can also view and retrieve a list of management events +over the last 90 days for Amazon CodeCatalyst in the CloudTrail console by viewing Event +history, or by creating a trail to create and maintain a record of events that extends past +90 days. For more information, see Working with CloudTrail Event History and Working with +CloudTrail trails. # Arguments - `end_time`: The time after which you do not want any events retrieved, in coordinated @@ -829,6 +1097,96 @@ function list_spaces( ) end +""" + list_workflow_runs(project_name, space_name) + list_workflow_runs(project_name, space_name, params::Dict{String,<:Any}) + +Retrieves a list of workflow runs of a specified workflow. + +# Arguments +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to show in a single call to this API. If + the number of results is larger than the number you specified, the response will include a + NextToken element, which you can use to obtain additional results. +- `"nextToken"`: A token returned from a call to this API to indicate the next batch of + results to return, if any. +- `"sortBy"`: Information used to sort the items in the returned list. +- `"workflowId"`: The ID of the workflow. To retrieve a list of workflow IDs, use + ListWorkflows. +""" +function list_workflow_runs( + projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "POST", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workflow_runs( + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "POST", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workflows(project_name, space_name) + list_workflows(project_name, space_name, params::Dict{String,<:Any}) + +Retrieves a list of workflows in a specified project. + +# Arguments +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to show in a single call to this API. If + the number of results is larger than the number you specified, the response will include a + NextToken element, which you can use to obtain additional results. +- `"nextToken"`: A token returned from a call to this API to indicate the next batch of + results to return, if any. +- `"sortBy"`: Information used to sort the items in the returned list. +""" +function list_workflows( + projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "POST", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflows"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workflows( + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "POST", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflows", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_dev_environment(id, project_name, space_name) start_dev_environment(id, project_name, space_name, params::Dict{String,<:Any}) @@ -926,6 +1284,60 @@ function start_dev_environment_session( ) end +""" + start_workflow_run(project_name, space_name, workflow_id) + start_workflow_run(project_name, space_name, workflow_id, params::Dict{String,<:Any}) + +Begins a run of a specified workflow. + +# Arguments +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. +- `workflow_id`: The system-generated unique ID of the workflow. To retrieve a list of + workflow IDs, use ListWorkflows. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A user-specified idempotency token. Idempotency ensures that an API + request completes only once. With an idempotent request, if the original request completes + successfully, the subsequent retries return the result from the original successful request + and have no additional effect. +""" +function start_workflow_run( + projectName, spaceName, workflowId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns", + Dict{String,Any}("workflowId" => workflowId, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_workflow_run( + projectName, + spaceName, + workflowId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/workflowRuns", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "workflowId" => workflowId, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_dev_environment(id, project_name, space_name) stop_dev_environment(id, project_name, space_name, params::Dict{String,<:Any}) @@ -1062,6 +1474,76 @@ function update_dev_environment( ) end +""" + update_project(name, space_name) + update_project(name, space_name, params::Dict{String,<:Any}) + +Changes one or more values for a project. + +# Arguments +- `name`: The name of the project. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the project. +""" +function update_project(name, spaceName; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "PATCH", + "/v1/spaces/$(spaceName)/projects/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_project( + name, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "PATCH", + "/v1/spaces/$(spaceName)/projects/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_space(name) + update_space(name, params::Dict{String,<:Any}) + +Changes one or more values for a space. + +# Arguments +- `name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the space. +""" +function update_space(name; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "PATCH", + "/v1/spaces/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_space( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "PATCH", + "/v1/spaces/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ verify_session() verify_session(params::Dict{String,<:Any}) diff --git a/src/services/codecommit.jl b/src/services/codecommit.jl index 1a724c4f84..ee247f4515 100644 --- a/src/services/codecommit.jl +++ b/src/services/codecommit.jl @@ -335,9 +335,9 @@ end create_approval_rule_template(approval_rule_template_content, approval_rule_template_name, params::Dict{String,<:Any}) Creates a template for approval rules that can then be associated with one or more -repositories in your AWS account. When you associate a template with a repository, AWS -CodeCommit creates an approval rule that matches the conditions of the template for all -pull requests that meet the conditions of the template. For more information, see +repositories in your Amazon Web Services account. When you associate a template with a +repository, CodeCommit creates an approval rule that matches the conditions of the template +for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository. # Arguments @@ -346,19 +346,20 @@ AssociateApprovalRuleTemplateWithRepository. (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template. When you create the content of the approval rule template, you can specify approvers in an approval pool in - one of two ways: CodeCommitApprovers: This option only requires an AWS account and a - resource. It can be used for both IAM users and federated access users whose name matches - the provided resource name. This is a very powerful option that offers a great deal of - flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all - of the following are counted as approvals coming from that user: An IAM user in the - account (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified in IAM as - Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) This option does not - recognize an active session of someone assuming the role of CodeCommitReview with a role - session name of Mary_Major - (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a - wildcard (*Mary_Major). Fully qualified ARN: This option allows you to specify the fully - qualified Amazon Resource Name (ARN) of the IAM user or role. For more information about - IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide. + one of two ways: CodeCommitApprovers: This option only requires an Amazon Web Services + account and a resource. It can be used for both IAM users and federated access users whose + name matches the provided resource name. This is a very powerful option that offers a great + deal of flexibility. For example, if you specify the Amazon Web Services account + 123456789012 and Mary_Major, all of the following are counted as approvals coming from that + user: An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major) A + federated user identified in IAM as Mary_Major + (arn:aws:sts::123456789012:federated-user/Mary_Major) This option does not recognize an + active session of someone assuming the role of CodeCommitReview with a role session name of + Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you + include a wildcard (*Mary_Major). Fully qualified ARN: This option allows you to specify + the fully qualified Amazon Resource Name (ARN) of the IAM user or role. For more + information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User + Guide. - `approval_rule_template_name`: The name of the approval rule template. Provide descriptive names, because this name is applied to the approval rules created automatically in associated repositories. @@ -537,9 +538,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientRequestToken"`: A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns - information about the initial request that used that token. The AWS SDKs prepopulate - client request tokens. If you are using an AWS SDK, an idempotency token is created for - you. + information about the initial request that used that token. The Amazon Web ServicesSDKs + prepopulate client request tokens. If you are using an Amazon Web ServicesSDK, an + idempotency token is created for you. - `"description"`: A description of the pull request. """ function create_pull_request( @@ -587,17 +588,17 @@ Creates an approval rule for a pull request. # Arguments - `approval_rule_content`: The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For - more information about approval pools, see the AWS CodeCommit User Guide. When you create - the content of the approval rule, you can specify approvers in an approval pool in one of - two ways: CodeCommitApprovers: This option only requires an AWS account and a resource. - It can be used for both IAM users and federated access users whose name matches the - provided resource name. This is a very powerful option that offers a great deal of - flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all - of the following would be counted as approvals coming from that user: An IAM user in the - account (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified in IAM as - Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) This option does not - recognize an active session of someone assuming the role of CodeCommitReview with a role - session name of Mary_Major + more information about approval pools, see the CodeCommit User Guide. When you create the + content of the approval rule, you can specify approvers in an approval pool in one of two + ways: CodeCommitApprovers: This option only requires an Amazon Web Services account and + a resource. It can be used for both IAM users and federated access users whose name matches + the provided resource name. This is a very powerful option that offers a great deal of + flexibility. For example, if you specify the Amazon Web Services account 123456789012 and + Mary_Major, all of the following would be counted as approvals coming from that user: An + IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major) A federated user + identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + This option does not recognize an active session of someone assuming the role of + CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major). Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role. For more information about @@ -657,13 +658,18 @@ Creates a new, empty repository. # Arguments - `repository_name`: The name of the new repository to be created. The repository name - must be unique across the calling AWS account. Repository names are limited to 100 - alphanumeric, dash, and underscore characters, and cannot include certain characters. For - more information about the limits on repository names, see Limits in the AWS CodeCommit - User Guide. The suffix .git is prohibited. + must be unique across the calling Amazon Web Services account. Repository names are limited + to 100 alphanumeric, dash, and underscore characters, and cannot include certain + characters. For more information about the limits on repository names, see Quotas in the + CodeCommit User Guide. The suffix .git is prohibited. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"kmsKeyId"`: The ID of the encryption key. You can view the ID of an encryption key in + the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more + information about acceptable values for kmsKeyID, see KeyId in the Decrypt API description + in the Key Management Service API Reference. If no key is specified, the default + aws/codecommit Amazon Web Services managed key is used. - `"repositoryDescription"`: A comment or description about the new repository. The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a @@ -1549,15 +1555,19 @@ active identities, use GetCommentReactions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"afterCommitId"`: The full commit ID of the commit in the source branch that was the tip - of the branch at the time the comment was made. + of the branch at the time the comment was made. Requirement is conditional: afterCommitId + must be specified when repositoryName is included. - `"beforeCommitId"`: The full commit ID of the commit in the destination branch that was - the tip of the branch at the time the pull request was created. + the tip of the branch at the time the pull request was created. Requirement is conditional: + beforeCommitId must be specified when repositoryName is included. - `"maxResults"`: A non-zero, non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request. - `"nextToken"`: An enumeration token that, when provided in a request, returns the next batch of the results. - `"repositoryName"`: The name of the repository that contains the pull request. + Requirement is conditional: repositoryName must be specified when beforeCommitId and + afterCommitId are included. """ function get_comments_for_pull_request( pullRequestId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1709,7 +1719,7 @@ Returns the base-64 encoded contents of a specified file and its metadata. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"commitSpecifier"`: The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or - a reference such as refs/heads/master. If none is provided, the head commit is used. + a reference such as refs/heads/main. If none is provided, the head commit is used. """ function get_file( filePath, repositoryName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2206,8 +2216,9 @@ end list_approval_rule_templates() list_approval_rule_templates(params::Dict{String,<:Any}) -Lists all approval rule templates in the specified AWS Region in your AWS account. If an -AWS Region is not specified, the AWS Region where you are signed in is used. +Lists all approval rule templates in the specified Amazon Web Services Region in your +Amazon Web Services account. If an Amazon Web Services Region is not specified, the Amazon +Web Services Region where you are signed in is used. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2310,6 +2321,58 @@ function list_branches( ) end +""" + list_file_commit_history(file_path, repository_name) + list_file_commit_history(file_path, repository_name, params::Dict{String,<:Any}) + +Retrieves a list of commits and changes to a specified file. + +# Arguments +- `file_path`: The full path of the file whose history you want to retrieve, including the + name of the file. +- `repository_name`: The name of the repository that contains the file. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"commitSpecifier"`: The fully quaified reference that identifies the commit that + contains the file. For example, you can specify a full commit ID, a tag, a branch name, or + a reference such as refs/heads/main. If none is provided, the head commit is used. +- `"maxResults"`: A non-zero, non-negative integer used to limit the number of returned + results. +- `"nextToken"`: An enumeration token that allows the operation to batch the results. +""" +function list_file_commit_history( + filePath, repositoryName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecommit( + "ListFileCommitHistory", + Dict{String,Any}("filePath" => filePath, "repositoryName" => repositoryName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_file_commit_history( + filePath, + repositoryName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecommit( + "ListFileCommitHistory", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "filePath" => filePath, "repositoryName" => repositoryName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_pull_requests(repository_name) list_pull_requests(repository_name, params::Dict{String,<:Any}) @@ -2366,7 +2429,7 @@ Gets information about one or more repositories. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"nextToken"`: An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the - token back to AWS CodeCommit, another page of 1,000 records is retrieved. + token back to CodeCommit, another page of 1,000 records is retrieved. - `"order"`: The order in which to sort the results of a list repositories operation. - `"sortBy"`: The criteria used to sort the results of a list repositories operation. """ @@ -2433,9 +2496,9 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS -CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and -Operations in the AWS CodeCommit User Guide. +Gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) +in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and +Operations in the CodeCommit User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to get @@ -3154,7 +3217,7 @@ modify, or delete a reaction for another user. - `comment_id`: The ID of the comment to which you want to add or update a reaction. - `reaction_value`: The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information - about emoji reaction values supported in AWS CodeCommit, see the AWS CodeCommit User Guide. + about emoji reaction values supported in CodeCommit, see the CodeCommit User Guide. """ function put_comment_reaction( @@ -3193,8 +3256,8 @@ end put_file(branch_name, file_content, file_path, repository_name) put_file(branch_name, file_content, file_path, repository_name, params::Dict{String,<:Any}) -Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit -for the addition in the specified branch. +Adds or updates a file in a branch in an CodeCommit repository, and generates a commit for +the addition in the specified branch. # Arguments - `branch_name`: The name of the branch where you want to add or update the file. If this @@ -3314,8 +3377,8 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS -CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide. +Adds or updates tags for a resource in CodeCommit. For a list of valid resources in +CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource to which you want to add @@ -3400,8 +3463,8 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS -CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide. +Removes tags for a resource in CodeCommit. For a list of valid resources in CodeCommit, see +CodeCommit Resources and Operations in the CodeCommit User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource to which you want to @@ -3647,8 +3710,9 @@ operation to change the default branch name to the current default branch name, message is returned even though the default branch did not change. # Arguments -- `default_branch_name`: The name of the branch to set as the default. -- `repository_name`: The name of the repository to set or change the default branch for. +- `default_branch_name`: The name of the branch to set as the default branch. +- `repository_name`: The name of the repository for which you want to set or change the + default branch. """ function update_default_branch( @@ -3698,15 +3762,15 @@ approvers. - `approval_rule_name`: The name of the approval rule you want to update. - `new_rule_content`: The updated content for the approval rule. When you update the content of the approval rule, you can specify approvers in an approval pool in one of two - ways: CodeCommitApprovers: This option only requires an AWS account and a resource. It - can be used for both IAM users and federated access users whose name matches the provided - resource name. This is a very powerful option that offers a great deal of flexibility. For - example, if you specify the AWS account 123456789012 and Mary_Major, all of the following - are counted as approvals coming from that user: An IAM user in the account - (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified in IAM as - Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) This option does not - recognize an active session of someone assuming the role of CodeCommitReview with a role - session name of Mary_Major + ways: CodeCommitApprovers: This option only requires an Amazon Web Services account and + a resource. It can be used for both IAM users and federated access users whose name matches + the provided resource name. This is a very powerful option that offers a great deal of + flexibility. For example, if you specify the Amazon Web Services account 123456789012 and + Mary_Major, all of the following are counted as approvals coming from that user: An IAM + user in the account (arn:aws:iam::123456789012:user/Mary_Major) A federated user + identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + This option does not recognize an active session of someone assuming the role of + CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major). Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role. For more information about @@ -3994,14 +4058,62 @@ function update_repository_description( ) end +""" + update_repository_encryption_key(kms_key_id, repository_name) + update_repository_encryption_key(kms_key_id, repository_name, params::Dict{String,<:Any}) + +Updates the Key Management Service encryption key used to encrypt and decrypt a CodeCommit +repository. + +# Arguments +- `kms_key_id`: The ID of the encryption key. You can view the ID of an encryption key in + the KMS console, or use the KMS APIs to programmatically retrieve a key ID. For more + information about acceptable values for keyID, see KeyId in the Decrypt API description in + the Key Management Service API Reference. +- `repository_name`: The name of the repository for which you want to update the KMS + encryption key used to encrypt and decrypt the repository. + +""" +function update_repository_encryption_key( + kmsKeyId, repositoryName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecommit( + "UpdateRepositoryEncryptionKey", + Dict{String,Any}("kmsKeyId" => kmsKeyId, "repositoryName" => repositoryName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_repository_encryption_key( + kmsKeyId, + repositoryName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecommit( + "UpdateRepositoryEncryptionKey", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, "repositoryName" => repositoryName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_repository_name(new_name, old_name) update_repository_name(new_name, old_name, params::Dict{String,<:Any}) -Renames a repository. The repository name must be unique across the calling AWS account. -Repository names are limited to 100 alphanumeric, dash, and underscore characters, and -cannot include certain characters. The suffix .git is prohibited. For more information -about the limits on repository names, see Limits in the AWS CodeCommit User Guide. +Renames a repository. The repository name must be unique across the calling Amazon Web +Services account. Repository names are limited to 100 alphanumeric, dash, and underscore +characters, and cannot include certain characters. The suffix .git is prohibited. For more +information about the limits on repository names, see Quotas in the CodeCommit User Guide. # Arguments - `new_name`: The new name for the repository. diff --git a/src/services/codeconnections.jl b/src/services/codeconnections.jl new file mode 100644 index 0000000000..12fcda7a80 --- /dev/null +++ b/src/services/codeconnections.jl @@ -0,0 +1,1175 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: codeconnections +using AWS.Compat +using AWS.UUIDs + +""" + create_connection(connection_name) + create_connection(connection_name, params::Dict{String,<:Any}) + +Creates a connection that can then be given to other Amazon Web Services services like +CodePipeline so that it can access third-party code repositories. The connection is in +pending status until the third-party connection handshake is completed from the console. + +# Arguments +- `connection_name`: The name of the connection to be created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HostArn"`: The Amazon Resource Name (ARN) of the host associated with the connection to + be created. +- `"ProviderType"`: The name of the external provider where your third-party code + repository is configured. +- `"Tags"`: The key-value pair to use when tagging the resource. +""" +function create_connection( + ConnectionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "CreateConnection", + Dict{String,Any}("ConnectionName" => ConnectionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_connection( + ConnectionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateConnection", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectionName" => ConnectionName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_host(name, provider_endpoint, provider_type) + create_host(name, provider_endpoint, provider_type, params::Dict{String,<:Any}) + +Creates a resource that represents the infrastructure where a third-party provider is +installed. The host is used when you create connections to an installed third-party +provider type, such as GitHub Enterprise Server. You create one host for all connections to +that provider. A host created through the CLI or the SDK is in `PENDING` status by +default. You can make its status `AVAILABLE` by setting up the host in the console. + +# Arguments +- `name`: The name of the host to be created. +- `provider_endpoint`: The endpoint of the infrastructure to be represented by the host + after it is created. +- `provider_type`: The name of the installed provider to be associated with your + connection. The host resource represents the infrastructure where your provider type is + installed. The valid provider type is GitHub Enterprise Server. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: Tags for the host to be created. +- `"VpcConfiguration"`: The VPC configuration to be provisioned for the host. A VPC must be + configured and the infrastructure to be represented by the host must already be connected + to the VPC. +""" +function create_host( + Name, ProviderEndpoint, ProviderType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "CreateHost", + Dict{String,Any}( + "Name" => Name, + "ProviderEndpoint" => ProviderEndpoint, + "ProviderType" => ProviderType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_host( + Name, + ProviderEndpoint, + ProviderType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateHost", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Name" => Name, + "ProviderEndpoint" => ProviderEndpoint, + "ProviderType" => ProviderType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_repository_link(connection_arn, owner_id, repository_name) + create_repository_link(connection_arn, owner_id, repository_name, params::Dict{String,<:Any}) + +Creates a link to a specified external Git repository. A repository link allows Git sync to +monitor and sync changes to files in a specified Git repository. + +# Arguments +- `connection_arn`: The Amazon Resource Name (ARN) of the connection to be associated with + the repository link. +- `owner_id`: The owner ID for the repository associated with a specific sync + configuration, such as the owner ID in GitHub. +- `repository_name`: The name of the repository to be associated with the repository link. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EncryptionKeyArn"`: The Amazon Resource Name (ARN) encryption key for the repository to + be associated with the repository link. +- `"Tags"`: The tags for the repository to be associated with the repository link. +""" +function create_repository_link( + ConnectionArn, + OwnerId, + RepositoryName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateRepositoryLink", + Dict{String,Any}( + "ConnectionArn" => ConnectionArn, + "OwnerId" => OwnerId, + "RepositoryName" => RepositoryName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_repository_link( + ConnectionArn, + OwnerId, + RepositoryName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConnectionArn" => ConnectionArn, + "OwnerId" => OwnerId, + "RepositoryName" => RepositoryName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_sync_configuration(branch, config_file, repository_link_id, resource_name, role_arn, sync_type) + create_sync_configuration(branch, config_file, repository_link_id, resource_name, role_arn, sync_type, params::Dict{String,<:Any}) + +Creates a sync configuration which allows Amazon Web Services to sync content from a Git +repository to update a specified Amazon Web Services resource. Parameters for the sync +configuration are determined by the sync type. + +# Arguments +- `branch`: The branch in the repository from which changes will be synced. +- `config_file`: The file name of the configuration file that manages syncing between the + connection and the repository. This configuration file is stored in the repository. +- `repository_link_id`: The ID of the repository link created for the connection. A + repository link allows Git sync to monitor and sync changes to files in a specified Git + repository. +- `resource_name`: The name of the Amazon Web Services resource (for example, a + CloudFormation stack in the case of CFN_STACK_SYNC) that will be synchronized from the + linked repository. +- `role_arn`: The ARN of the IAM role that grants permission for Amazon Web Services to use + Git sync to update a given Amazon Web Services resource on your behalf. +- `sync_type`: The type of sync configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"PublishDeploymentStatus"`: Whether to enable or disable publishing of deployment status + to source providers. +- `"TriggerResourceUpdateOn"`: When to trigger Git sync to begin the stack update. +""" +function create_sync_configuration( + Branch, + ConfigFile, + RepositoryLinkId, + ResourceName, + RoleArn, + SyncType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateSyncConfiguration", + Dict{String,Any}( + "Branch" => Branch, + "ConfigFile" => ConfigFile, + "RepositoryLinkId" => RepositoryLinkId, + "ResourceName" => ResourceName, + "RoleArn" => RoleArn, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_sync_configuration( + Branch, + ConfigFile, + RepositoryLinkId, + ResourceName, + RoleArn, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "CreateSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Branch" => Branch, + "ConfigFile" => ConfigFile, + "RepositoryLinkId" => RepositoryLinkId, + "ResourceName" => ResourceName, + "RoleArn" => RoleArn, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_connection(connection_arn) + delete_connection(connection_arn, params::Dict{String,<:Any}) + +The connection to be deleted. + +# Arguments +- `connection_arn`: The Amazon Resource Name (ARN) of the connection to be deleted. The + ARN is never reused if the connection is deleted. + +""" +function delete_connection(ConnectionArn; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "DeleteConnection", + Dict{String,Any}("ConnectionArn" => ConnectionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_connection( + ConnectionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "DeleteConnection", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectionArn" => ConnectionArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_host(host_arn) + delete_host(host_arn, params::Dict{String,<:Any}) + +The host to be deleted. Before you delete a host, all connections associated to the host +must be deleted. A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or +VPC_CONFIG_DELETING state. + +# Arguments +- `host_arn`: The Amazon Resource Name (ARN) of the host to be deleted. + +""" +function delete_host(HostArn; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "DeleteHost", + Dict{String,Any}("HostArn" => HostArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_host( + HostArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "DeleteHost", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("HostArn" => HostArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_repository_link(repository_link_id) + delete_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Deletes the association between your connection and a specified external Git repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to be deleted. + +""" +function delete_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "DeleteRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "DeleteRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_sync_configuration(resource_name, sync_type) + delete_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Deletes the sync configuration for a specified repository and connection. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource associated with the sync + configuration to be deleted. +- `sync_type`: The type of sync configuration to be deleted. + +""" +function delete_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "DeleteSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "DeleteSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_connection(connection_arn) + get_connection(connection_arn, params::Dict{String,<:Any}) + +Returns the connection ARN and details such as status, owner, and provider type. + +# Arguments +- `connection_arn`: The Amazon Resource Name (ARN) of a connection. + +""" +function get_connection(ConnectionArn; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "GetConnection", + Dict{String,Any}("ConnectionArn" => ConnectionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_connection( + ConnectionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetConnection", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectionArn" => ConnectionArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_host(host_arn) + get_host(host_arn, params::Dict{String,<:Any}) + +Returns the host ARN and details such as status, provider type, endpoint, and, if +applicable, the VPC configuration. + +# Arguments +- `host_arn`: The Amazon Resource Name (ARN) of the requested host. + +""" +function get_host(HostArn; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "GetHost", + Dict{String,Any}("HostArn" => HostArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_host( + HostArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetHost", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("HostArn" => HostArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_repository_link(repository_link_id) + get_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Returns details about a repository link. A repository link allows Git sync to monitor and +sync changes from files in a specified Git repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to get. + +""" +function get_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_repository_sync_status(branch, repository_link_id, sync_type) + get_repository_sync_status(branch, repository_link_id, sync_type, params::Dict{String,<:Any}) + +Returns details about the sync status for a repository. A repository sync uses Git sync to +push and pull changes from your remote repository. + +# Arguments +- `branch`: The branch of the repository link for the requested repository sync status. +- `repository_link_id`: The repository link ID for the requested repository sync status. +- `sync_type`: The sync type of the requested sync status. + +""" +function get_repository_sync_status( + Branch, RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetRepositorySyncStatus", + Dict{String,Any}( + "Branch" => Branch, + "RepositoryLinkId" => RepositoryLinkId, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_repository_sync_status( + Branch, + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetRepositorySyncStatus", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Branch" => Branch, + "RepositoryLinkId" => RepositoryLinkId, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_resource_sync_status(resource_name, sync_type) + get_resource_sync_status(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns the status of the sync with the Git repository for a specific Amazon Web Services +resource. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync status with + the Git repository. +- `sync_type`: The sync type for the sync status with the Git repository. + +""" +function get_resource_sync_status( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetResourceSyncStatus", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_sync_status( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetResourceSyncStatus", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sync_blocker_summary(resource_name, sync_type) + get_sync_blocker_summary(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns a list of the most recent sync blockers. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource currently blocked from + automatically being synced from a Git repository. +- `sync_type`: The sync type for the sync blocker summary. + +""" +function get_sync_blocker_summary( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetSyncBlockerSummary", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sync_blocker_summary( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetSyncBlockerSummary", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sync_configuration(resource_name, sync_type) + get_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns details about a sync configuration, including the sync type and resource name. A +sync configuration allows the configuration to sync (push and pull) changes from the remote +repository for a specified branch in a Git repository. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync configuration + for which you want to retrieve information. +- `sync_type`: The sync type for the sync configuration for which you want to retrieve + information. + +""" +function get_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "GetSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "GetSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_connections() + list_connections(params::Dict{String,<:Any}) + +Lists the connections associated with your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HostArnFilter"`: Filters the list of connections to those associated with a specified + host. +- `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned nextToken value. +- `"NextToken"`: The token that was returned from the previous ListConnections call, which + can be used to return the next set of connections in the list. +- `"ProviderTypeFilter"`: Filters the list of connections to those associated with a + specified provider, such as Bitbucket. +""" +function list_connections(; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "ListConnections"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_connections( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListConnections", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_hosts() + list_hosts(params::Dict{String,<:Any}) + +Lists the hosts associated with your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned nextToken value. +- `"NextToken"`: The token that was returned from the previous ListHosts call, which can be + used to return the next set of hosts in the list. +""" +function list_hosts(; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "ListHosts"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_hosts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListHosts", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_repository_links() + list_repository_links(params::Dict{String,<:Any}) + +Lists the repository links created for connections in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: A non-zero, non-negative integer used to limit the number of returned + results. +- `"NextToken"`: An enumeration token that, when provided in a request, returns the next + batch of the results. +""" +function list_repository_links(; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "ListRepositoryLinks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_repository_links( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListRepositoryLinks", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_repository_sync_definitions(repository_link_id, sync_type) + list_repository_sync_definitions(repository_link_id, sync_type, params::Dict{String,<:Any}) + +Lists the repository sync definitions for repository links in your account. + +# Arguments +- `repository_link_id`: The ID of the repository link for the sync definition for which you + want to retrieve information. +- `sync_type`: The sync type of the repository link for the the sync definition for which + you want to retrieve information. + +""" +function list_repository_sync_definitions( + RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListRepositorySyncDefinitions", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_repository_sync_definitions( + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "ListRepositorySyncDefinitions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_sync_configurations(repository_link_id, sync_type) + list_sync_configurations(repository_link_id, sync_type, params::Dict{String,<:Any}) + +Returns a list of sync configurations for a specified repository. + +# Arguments +- `repository_link_id`: The ID of the repository link for the requested list of sync + configurations. +- `sync_type`: The sync type for the requested list of sync configurations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: A non-zero, non-negative integer used to limit the number of returned + results. +- `"NextToken"`: An enumeration token that allows the operation to batch the results of the + operation. +""" +function list_sync_configurations( + RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListSyncConfigurations", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_sync_configurations( + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "ListSyncConfigurations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Gets the set of key-value pairs (metadata) that are used to manage the resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to get + information about tags, if any. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "ListTagsForResource", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds to or modifies the tags of the given resource. Tags are metadata that can be used to +manage a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to which you want to add + or update tags. +- `tags`: The tags you want to modify or add to the resource. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "TagResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from an Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to remove tags from. +- `tag_keys`: The list of keys for the tags to be removed from the resource. + +""" +function untag_resource( + ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "UntagResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_host(host_arn) + update_host(host_arn, params::Dict{String,<:Any}) + +Updates a specified host with the provided configurations. + +# Arguments +- `host_arn`: The Amazon Resource Name (ARN) of the host to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ProviderEndpoint"`: The URL or endpoint of the host to be updated. +- `"VpcConfiguration"`: The VPC configuration of the host to be updated. A VPC must be + configured and the infrastructure to be represented by the host must already be connected + to the VPC. +""" +function update_host(HostArn; aws_config::AbstractAWSConfig=global_aws_config()) + return codeconnections( + "UpdateHost", + Dict{String,Any}("HostArn" => HostArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_host( + HostArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "UpdateHost", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("HostArn" => HostArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_repository_link(repository_link_id) + update_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Updates the association between your connection and a specified external Git repository. A +repository link allows Git sync to monitor and sync changes to files in a specified Git +repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConnectionArn"`: The Amazon Resource Name (ARN) of the connection for the repository + link to be updated. The updated connection ARN must have the same providerType (such as + GitHub) as the original connection ARN for the repo link. +- `"EncryptionKeyArn"`: The Amazon Resource Name (ARN) of the encryption key for the + repository link to be updated. +""" +function update_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "UpdateRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "UpdateRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_sync_blocker(id, resolved_reason, resource_name, sync_type) + update_sync_blocker(id, resolved_reason, resource_name, sync_type, params::Dict{String,<:Any}) + +Allows you to update the status of a sync blocker, resolving the blocker and allowing +syncing to continue. + +# Arguments +- `id`: The ID of the sync blocker to be updated. +- `resolved_reason`: The reason for resolving the sync blocker. +- `resource_name`: The name of the resource for the sync blocker to be updated. +- `sync_type`: The sync type of the sync blocker to be updated. + +""" +function update_sync_blocker( + Id, + ResolvedReason, + ResourceName, + SyncType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "UpdateSyncBlocker", + Dict{String,Any}( + "Id" => Id, + "ResolvedReason" => ResolvedReason, + "ResourceName" => ResourceName, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_sync_blocker( + Id, + ResolvedReason, + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "UpdateSyncBlocker", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Id" => Id, + "ResolvedReason" => ResolvedReason, + "ResourceName" => ResourceName, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_sync_configuration(resource_name, sync_type) + update_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Updates the sync configuration for your connection and a specified external Git repository. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync configuration + to be updated. +- `sync_type`: The sync type for the sync configuration to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Branch"`: The branch for the sync configuration to be updated. +- `"ConfigFile"`: The configuration file for the sync configuration to be updated. +- `"PublishDeploymentStatus"`: Whether to enable or disable publishing of deployment status + to source providers. +- `"RepositoryLinkId"`: The ID of the repository link for the sync configuration to be + updated. +- `"RoleArn"`: The ARN of the IAM role for the sync configuration to be updated. +- `"TriggerResourceUpdateOn"`: When to trigger Git sync to begin the stack update. +""" +function update_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codeconnections( + "UpdateSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codeconnections( + "UpdateSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/codedeploy.jl b/src/services/codedeploy.jl index add2daffe2..4e67b56fc8 100644 --- a/src/services/codedeploy.jl +++ b/src/services/codedeploy.jl @@ -140,7 +140,7 @@ Gets information about one or more deployment groups. # Arguments - `application_name`: The name of an CodeDeploy application associated with the applicable - IAM or Amazon Web Services account. + user or Amazon Web Services account. - `deployment_group_names`: The names of the deployment groups. """ @@ -228,8 +228,8 @@ function batch_get_deployment_instances( end """ - batch_get_deployment_targets() - batch_get_deployment_targets(params::Dict{String,<:Any}) + batch_get_deployment_targets(deployment_id, target_ids) + batch_get_deployment_targets(deployment_id, target_ids, params::Dict{String,<:Any}) Returns an array of one or more targets associated with a deployment. This method works with all compute types and should be used instead of the deprecated @@ -240,10 +240,9 @@ about Lambda functions targets. Amazon ECS: Information about Amazon ECS ser targets. CloudFormation: Information about targets of blue/green deployments initiated by a CloudFormation stack update. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"deploymentId"`: The unique ID of a deployment. -- `"targetIds"`: The unique IDs of the deployment targets. The compute platform of the +# Arguments +- `deployment_id`: The unique ID of a deployment. +- `target_ids`: The unique IDs of the deployment targets. The compute platform of the deployment determines the type of the targets and their formats. The maximum number of deployment target IDs you can specify is 25. For deployments that use the EC2/On-premises compute platform, the target IDs are Amazon EC2 or on-premises instances @@ -254,18 +253,33 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys <clustername>:<servicename>. Their target type is ecsTarget. For deployments that are deployed with CloudFormation, the target IDs are CloudFormation stack IDs. Their target type is cloudFormationTarget. + """ -function batch_get_deployment_targets(; aws_config::AbstractAWSConfig=global_aws_config()) +function batch_get_deployment_targets( + deploymentId, targetIds; aws_config::AbstractAWSConfig=global_aws_config() +) return codedeploy( - "BatchGetDeploymentTargets"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "BatchGetDeploymentTargets", + Dict{String,Any}("deploymentId" => deploymentId, "targetIds" => targetIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function batch_get_deployment_targets( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + deploymentId, + targetIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return codedeploy( "BatchGetDeploymentTargets", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("deploymentId" => deploymentId, "targetIds" => targetIds), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -384,7 +398,7 @@ Creates an application. # Arguments - `application_name`: The name of the application. This name must be unique with the - applicable IAM or Amazon Web Services account. + applicable user or Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -428,16 +442,16 @@ end Deploys an application revision through the specified deployment group. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"autoRollbackConfiguration"`: Configuration information for an automatic rollback that is added when a deployment is created. -- `"deploymentConfigName"`: The name of a deployment configuration associated with the IAM - user or Amazon Web Services account. If not specified, the value configured in the - deployment group is used as the default. If the deployment group does not have a deployment +- `"deploymentConfigName"`: The name of a deployment configuration associated with the user + or Amazon Web Services account. If not specified, the value configured in the deployment + group is used as the default. If the deployment group does not have a deployment configuration associated with it, CodeDeployDefault.OneAtATime is used by default. - `"deploymentGroupName"`: The name of the deployment group. - `"description"`: A comment about the deployment. @@ -533,6 +547,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys FLEET_PERCENT and a value of 95. - `"trafficRoutingConfig"`: The configuration that specifies how the deployment traffic is routed. +- `"zonalConfig"`: Configure the ZonalConfig object if you want CodeDeploy to deploy your + application to one Availability Zone at a time, within an Amazon Web Services Region. For + more information about the zonal configuration feature, see zonal configuration in the + CodeDeploy User Guide. """ function create_deployment_config( deploymentConfigName; aws_config::AbstractAWSConfig=global_aws_config() @@ -570,7 +588,7 @@ end Creates a deployment group to which application revisions are deployed. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. - `deployment_group_name`: The name of a new deployment group for the specified application. - `service_role_arn`: A service role Amazon Resource Name (ARN) that allows CodeDeploy to @@ -621,6 +639,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: The metadata that you apply to CodeDeploy deployment groups to help you organize and categorize them. Each tag consists of a key and an optional value, both of which you define. +- `"terminationHookEnabled"`: This parameter only applies if you are using CodeDeploy with + Amazon EC2 Auto Scaling. For more information, see Integrating CodeDeploy with Amazon EC2 + Auto Scaling in the CodeDeploy User Guide. Set terminationHookEnabled to true to have + CodeDeploy install a termination hook into your Auto Scaling group when you create a + deployment group. When this hook is installed, CodeDeploy will perform termination + deployments. For information about termination deployments, see Enabling termination + deployments during Auto Scaling scale-in events in the CodeDeploy User Guide. For more + information about Auto Scaling scale-in events, see the Scale in topic in the Amazon EC2 + Auto Scaling User Guide. - `"triggerConfigurations"`: Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an CodeDeploy Event in the CodeDeploy User Guide. @@ -674,7 +701,7 @@ end Deletes an application. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. """ @@ -713,8 +740,8 @@ Deletes a deployment configuration. A deployment configuration cannot be delete currently in use. Predefined configurations cannot be deleted. # Arguments -- `deployment_config_name`: The name of a deployment configuration associated with the IAM - user or Amazon Web Services account. +- `deployment_config_name`: The name of a deployment configuration associated with the user + or Amazon Web Services account. """ function delete_deployment_config( @@ -753,7 +780,7 @@ end Deletes a deployment group. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. - `deployment_group_name`: The name of a deployment group for the specified application. @@ -824,7 +851,11 @@ end delete_resources_by_external_id() delete_resources_by_external_id(params::Dict{String,<:Any}) -Deletes resources linked to an external ID. +Deletes resources linked to an external ID. This action only applies if you have configured +blue/green deployments through CloudFormation. It is not necessary to call this action +directly. CloudFormation calls it on your behalf when it needs to delete stack resources. +This action is offered publicly in case you need to delete resources to comply with General +Data Protection Regulation (GDPR) requirements. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -893,7 +924,7 @@ end Gets information about an application. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. """ @@ -975,8 +1006,8 @@ the returned revision is always null. Use GetApplicationRevision and the sha256 the returned appSpecContent object to get the content of the deployment’s AppSpec file. # Arguments -- `deployment_id`: The unique ID of a deployment associated with the IAM user or Amazon - Web Services account. +- `deployment_id`: The unique ID of a deployment associated with the user or Amazon Web + Services account. """ function get_deployment(deploymentId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1009,8 +1040,8 @@ end Gets information about a deployment configuration. # Arguments -- `deployment_config_name`: The name of a deployment configuration associated with the IAM - user or Amazon Web Services account. +- `deployment_config_name`: The name of a deployment configuration associated with the user + or Amazon Web Services account. """ function get_deployment_config( @@ -1049,7 +1080,7 @@ end Gets information about a deployment group. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. - `deployment_group_name`: The name of a deployment group for the specified application. @@ -1134,27 +1165,41 @@ function get_deployment_instance( end """ - get_deployment_target() - get_deployment_target(params::Dict{String,<:Any}) + get_deployment_target(deployment_id, target_id) + get_deployment_target(deployment_id, target_id, params::Dict{String,<:Any}) Returns information about a deployment target. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"deploymentId"`: The unique ID of a deployment. -- `"targetId"`: The unique ID of a deployment target. +# Arguments +- `deployment_id`: The unique ID of a deployment. +- `target_id`: The unique ID of a deployment target. + """ -function get_deployment_target(; aws_config::AbstractAWSConfig=global_aws_config()) +function get_deployment_target( + deploymentId, targetId; aws_config::AbstractAWSConfig=global_aws_config() +) return codedeploy( - "GetDeploymentTarget"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "GetDeploymentTarget", + Dict{String,Any}("deploymentId" => deploymentId, "targetId" => targetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function get_deployment_target( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + deploymentId, + targetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return codedeploy( "GetDeploymentTarget", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("deploymentId" => deploymentId, "targetId" => targetId), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1202,8 +1247,8 @@ end Lists information about revisions for an application. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user - or Amazon Web Services account. +- `application_name`: The name of an CodeDeploy application associated with the user or + Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1257,7 +1302,7 @@ end list_applications() list_applications(params::Dict{String,<:Any}) -Lists the applications registered with the IAM user or Amazon Web Services account. +Lists the applications registered with the user or Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1281,7 +1326,7 @@ end list_deployment_configs() list_deployment_configs(params::Dict{String,<:Any}) -Lists the deployment configurations with the IAM user or Amazon Web Services account. +Lists the deployment configurations with the user or Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1308,11 +1353,11 @@ end list_deployment_groups(application_name) list_deployment_groups(application_name, params::Dict{String,<:Any}) -Lists the deployment groups for an application registered with the IAM user or Amazon Web -Services account. +Lists the deployment groups for an application registered with the Amazon Web Services user +or Amazon Web Services account. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. # Optional Parameters @@ -1354,7 +1399,7 @@ end The newer BatchGetDeploymentTargets should be used instead because it works with all compute types. ListDeploymentInstances throws an exception if it is used with a compute platform other than EC2/On-premises or Lambda. Lists the instance for a deployment -associated with the IAM user or Amazon Web Services account. +associated with the user or Amazon Web Services account. # Arguments - `deployment_id`: The unique ID of a deployment. @@ -1399,14 +1444,16 @@ function list_deployment_instances( end """ - list_deployment_targets() - list_deployment_targets(params::Dict{String,<:Any}) + list_deployment_targets(deployment_id) + list_deployment_targets(deployment_id, params::Dict{String,<:Any}) Returns an array of target IDs that are associated a deployment. +# Arguments +- `deployment_id`: The unique ID of a deployment. + # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"deploymentId"`: The unique ID of a deployment. - `"nextToken"`: A token identifier returned from the previous ListDeploymentTargets call. It can be used to return the next set of deployment targets in the list. - `"targetFilters"`: A key used to filter the returned targets. The two valid values are: @@ -1414,17 +1461,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Skipped, Succeeded, or Unknown. ServerInstanceLabel - A ServerInstanceLabel filter string can be Blue or Green. """ -function list_deployment_targets(; aws_config::AbstractAWSConfig=global_aws_config()) +function list_deployment_targets( + deploymentId; aws_config::AbstractAWSConfig=global_aws_config() +) return codedeploy( - "ListDeploymentTargets"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "ListDeploymentTargets", + Dict{String,Any}("deploymentId" => deploymentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function list_deployment_targets( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + deploymentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return codedeploy( "ListDeploymentTargets", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("deploymentId" => deploymentId), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1434,13 +1490,13 @@ end list_deployments() list_deployments(params::Dict{String,<:Any}) -Lists the deployments in a deployment group for an application registered with the IAM user -or Amazon Web Services account. +Lists the deployments in a deployment group for an application registered with the user or +Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"applicationName"`: The name of an CodeDeploy application associated with the IAM user - or Amazon Web Services account. If applicationName is specified, then deploymentGroupName +- `"applicationName"`: The name of an CodeDeploy application associated with the user or + Amazon Web Services account. If applicationName is specified, then deploymentGroupName must be specified. If it is not specified, then deploymentGroupName must not be specified. - `"createTimeRange"`: A time range (start and end) for returning a subset of the list of deployments. @@ -1627,7 +1683,7 @@ end Registers with CodeDeploy a revision for the specified application. # Arguments -- `application_name`: The name of an CodeDeploy application associated with the IAM user or +- `application_name`: The name of an CodeDeploy application associated with the user or Amazon Web Services account. - `revision`: Information about the application revision to register, including type and location. @@ -1681,7 +1737,7 @@ is supported in the request. You cannot use both. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"iamSessionArn"`: The ARN of the IAM session to associate with the on-premises instance. -- `"iamUserArn"`: The ARN of the IAM user to associate with the on-premises instance. +- `"iamUserArn"`: The ARN of the user to associate with the on-premises instance. """ function register_on_premises_instance( instanceName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1985,6 +2041,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys deployment to update the new Amazon EC2 instances. This may result in instances having different revisions. - `"serviceRoleArn"`: A replacement ARN for the service role, if you want to change it. +- `"terminationHookEnabled"`: This parameter only applies if you are using CodeDeploy with + Amazon EC2 Auto Scaling. For more information, see Integrating CodeDeploy with Amazon EC2 + Auto Scaling in the CodeDeploy User Guide. Set terminationHookEnabled to true to have + CodeDeploy install a termination hook into your Auto Scaling group when you update a + deployment group. When this hook is installed, CodeDeploy will perform termination + deployments. For information about termination deployments, see Enabling termination + deployments during Auto Scaling scale-in events in the CodeDeploy User Guide. For more + information about Auto Scaling scale-in events, see the Scale in topic in the Amazon EC2 + Auto Scaling User Guide. - `"triggerConfigurations"`: Information about triggers to change when the deployment group is updated. For examples, see Edit a Trigger in a CodeDeploy Deployment Group in the CodeDeploy User Guide. diff --git a/src/services/codeguru_security.jl b/src/services/codeguru_security.jl index b90dacb3b9..41225c26be 100644 --- a/src/services/codeguru_security.jl +++ b/src/services/codeguru_security.jl @@ -8,7 +8,7 @@ using AWS.UUIDs batch_get_findings(finding_identifiers) batch_get_findings(finding_identifiers, params::Dict{String,<:Any}) -Returns a list of all requested findings. +Returns a list of requested findings from standard scans. # Arguments - `finding_identifiers`: A list of finding identifiers. Each identifier consists of a @@ -48,18 +48,17 @@ end create_scan(resource_id, scan_name) create_scan(resource_id, scan_name, params::Dict{String,<:Any}) -Use to create a scan using code uploaded to an S3 bucket. +Use to create a scan using code uploaded to an Amazon S3 bucket. # Arguments -- `resource_id`: The identifier for an input resource used to create a scan. +- `resource_id`: The identifier for the resource object to be scanned. - `scan_name`: The unique name that CodeGuru Security uses to track revisions across - multiple scans of the same resource. Only allowed for a STANDARD scan type. If not - specified, it will be auto generated. + multiple scans of the same resource. Only allowed for a STANDARD scan type. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"analysisType"`: The type of analysis you want CodeGuru Security to perform in the scan, - either Security or All. The Secuirty type only generates findings related to security. The + either Security or All. The Security type only generates findings related to security. The All type generates both security findings and quality findings. Defaults to Security type if missing. - `"clientToken"`: The idempotency token for the request. Amazon CodeGuru Security uses @@ -119,8 +118,9 @@ end create_upload_url(scan_name) create_upload_url(scan_name, params::Dict{String,<:Any}) -Generates a pre-signed URL and request headers used to upload a code resource. You can -upload your code resource to the URL and add the request headers using any HTTP client. +Generates a pre-signed URL, request headers used to upload a code resource, and code +artifact identifier for the uploaded resource. You can upload your code resource to the URL +with the request headers using any HTTP client. # Arguments - `scan_name`: The name of the scan that will use the uploaded resource. CodeGuru Security @@ -157,7 +157,7 @@ end get_account_configuration() get_account_configuration(params::Dict{String,<:Any}) -Use to get account level configuration. +Use to get the encryption configuration for an account. """ function get_account_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -194,7 +194,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent - request to retrieve additional results. + request to retrieve additional results. If not specified, returns 1000 results. - `"nextToken"`: A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after @@ -227,14 +227,13 @@ end get_metrics_summary(date) get_metrics_summary(date, params::Dict{String,<:Any}) -Returns top level metrics about an account from a specified date, including number of open +Returns a summary of metrics for an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings. # Arguments - `date`: The date you want to retrieve summary metrics from, rounded to the nearest day. - The date must be within the past two years since metrics data is only stored for two years. - If a date outside of this range is passed, the response will be empty. + The date must be within the past two years. """ function get_metrics_summary(date; aws_config::AbstractAWSConfig=global_aws_config()) @@ -299,15 +298,17 @@ end Returns metrics about all findings in an account within a specified time range. # Arguments -- `end_date`: The end date of the interval which you want to retrieve metrics from. +- `end_date`: The end date of the interval which you want to retrieve metrics from. Round + to the nearest day. - `start_date`: The start date of the interval which you want to retrieve metrics from. + Rounds to the nearest day. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent - request to retrieve additional results. + request to retrieve additional results. If not specified, returns 1000 results. - `"nextToken"`: A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after @@ -349,14 +350,14 @@ end list_scans() list_scans(params::Dict{String,<:Any}) -Returns a list of all the scans in an account. +Returns a list of all scans in an account. Does not return EXPRESS scans. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent - request to retrieve additional results. + request to retrieve additional results. If not specified, returns 100 results. - `"nextToken"`: A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after @@ -383,7 +384,7 @@ Returns a list of all tags associated with a scan. # Arguments - `resource_arn`: The ARN of the ScanName object. You can retrieve this ARN by calling - ListScans or GetScan. + CreateScan, ListScans, or GetScan. """ function list_tags_for_resource( @@ -418,7 +419,7 @@ Use to add one or more tags to an existing scan. # Arguments - `resource_arn`: The ARN of the ScanName object. You can retrieve this ARN by calling - ListScans or GetScan. + CreateScan, ListScans, or GetScan. - `tags`: An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts: A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive. An optional tag value field. For example, @@ -458,7 +459,7 @@ Use to remove one or more tags from an existing scan. # Arguments - `resource_arn`: The ARN of the ScanName object. You can retrieve this ARN by calling - ListScans or GetScan. + CreateScan, ListScans, or GetScan. - `tag_keys`: A list of keys for each tag you want to remove from a scan. """ @@ -492,11 +493,13 @@ end update_account_configuration(encryption_config) update_account_configuration(encryption_config, params::Dict{String,<:Any}) -Use to update account-level configuration with an encryption key. +Use to update the encryption configuration for an account. # Arguments -- `encryption_config`: The KMS key ARN you want to use for encryption. Defaults to - service-side encryption if missing. +- `encryption_config`: The customer-managed KMS key ARN you want to use for encryption. If + not specified, CodeGuru Security will use an AWS-managed key for encryption. If you + previously specified a customer-managed KMS key and want CodeGuru Security to use an + AWS-managed key for encryption instead, pass nothing. """ function update_account_configuration( diff --git a/src/services/codepipeline.jl b/src/services/codepipeline.jl index cf292ae368..50afd54dac 100644 --- a/src/services/codepipeline.jl +++ b/src/services/codepipeline.jl @@ -742,8 +742,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default - value is 100. Detailed execution history is available for executions run on or after - February 21, 2019. + value is 100. - `"nextToken"`: The token that was returned from the previous ListActionExecutions call, which can be used to return the next set of action executions in the list. """ @@ -803,7 +802,9 @@ end list_pipeline_executions(pipeline_name) list_pipeline_executions(pipeline_name, params::Dict{String,<:Any}) -Gets a summary of the most recent executions for a pipeline. +Gets a summary of the most recent executions for a pipeline. When applying the filter for +pipeline executions that have succeeded in the stage, the operation returns all executions +in the current pipeline version beginning on February 1, 2024. # Arguments - `pipeline_name`: The name of the pipeline for which you want to get execution summary @@ -811,6 +812,7 @@ Gets a summary of the most recent executions for a pipeline. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: The pipeline execution to filter on. - `"maxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default @@ -1425,17 +1427,20 @@ end retry_stage_execution(pipeline_execution_id, pipeline_name, retry_mode, stage_name) retry_stage_execution(pipeline_execution_id, pipeline_name, retry_mode, stage_name, params::Dict{String,<:Any}) -Resumes the pipeline execution by retrying the last failed actions in a stage. You can -retry a stage immediately if any of the actions in the stage fail. When you retry, all -actions that are still in progress continue working, and failed actions are triggered again. +You can retry a stage that has failed without having to run a pipeline again from the +beginning. You do this by either retrying the failed actions in a stage or by retrying all +actions in the stage starting from the first action in the stage. When you retry the failed +actions in a stage, all actions that are still in progress continue working, and failed +actions are triggered again. When you retry a failed stage from the first action in the +stage, the stage cannot have any actions in progress. Before a stage can be retried, it +must either have all actions failed or some actions failed and some succeeded. # Arguments - `pipeline_execution_id`: The ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage - `pipeline_name`: The name of the pipeline that contains the failed stage. -- `retry_mode`: The scope of the retry attempt. Currently, the only supported value is - FAILED_ACTIONS. +- `retry_mode`: The scope of the retry attempt. - `stage_name`: The name of the failed stage to be retried. """ @@ -1485,6 +1490,61 @@ function retry_stage_execution( ) end +""" + rollback_stage(pipeline_name, stage_name, target_pipeline_execution_id) + rollback_stage(pipeline_name, stage_name, target_pipeline_execution_id, params::Dict{String,<:Any}) + +Rolls back a stage execution. + +# Arguments +- `pipeline_name`: The name of the pipeline for which the stage will be rolled back. +- `stage_name`: The name of the stage in the pipeline to be rolled back. +- `target_pipeline_execution_id`: The pipeline execution ID for the stage to be rolled back + to. + +""" +function rollback_stage( + pipelineName, + stageName, + targetPipelineExecutionId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "RollbackStage", + Dict{String,Any}( + "pipelineName" => pipelineName, + "stageName" => stageName, + "targetPipelineExecutionId" => targetPipelineExecutionId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function rollback_stage( + pipelineName, + stageName, + targetPipelineExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "RollbackStage", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "pipelineName" => pipelineName, + "stageName" => stageName, + "targetPipelineExecutionId" => targetPipelineExecutionId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_pipeline_execution(name) start_pipeline_execution(name, params::Dict{String,<:Any}) @@ -1499,6 +1559,12 @@ source location specified as part of the pipeline. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientRequestToken"`: The system-generated unique ID used to identify a unique execution request. +- `"sourceRevisions"`: A list that allows you to specify, or override, the source revision + for a pipeline execution that's being started. A source revision is the version with all + the changes to your application code, or source artifact, for the pipeline execution. +- `"variables"`: A list that overrides pipeline variables for a pipeline execution that's + being started. Variable names must match [A-Za-z0-9@-_]+, and the values can be anything + except an empty string. """ function start_pipeline_execution(name; aws_config::AbstractAWSConfig=global_aws_config()) return codepipeline( diff --git a/src/services/codestar_connections.jl b/src/services/codestar_connections.jl index 4ca2f80270..87f988b75c 100644 --- a/src/services/codestar_connections.jl +++ b/src/services/codestar_connections.jl @@ -8,13 +8,12 @@ using AWS.UUIDs create_connection(connection_name) create_connection(connection_name, params::Dict{String,<:Any}) -Creates a connection that can then be given to other AWS services like CodePipeline so that -it can access third-party code repositories. The connection is in pending status until the -third-party connection handshake is completed from the console. +Creates a connection that can then be given to other Amazon Web Services services like +CodePipeline so that it can access third-party code repositories. The connection is in +pending status until the third-party connection handshake is completed from the console. # Arguments -- `connection_name`: The name of the connection to be created. The name must be unique in - the calling AWS account. +- `connection_name`: The name of the connection to be created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -60,8 +59,7 @@ that provider. A host created through the CLI or the SDK is in `PENDING` status default. You can make its status `AVAILABLE` by setting up the host in the console. # Arguments -- `name`: The name of the host to be created. The name must be unique in the calling AWS - account. +- `name`: The name of the host to be created. - `provider_endpoint`: The endpoint of the infrastructure to be represented by the host after it is created. - `provider_type`: The name of the installed provider to be associated with your @@ -70,7 +68,7 @@ default. You can make its status `AVAILABLE` by setting up the host in the conso # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Tags"`: +- `"Tags"`: Tags for the host to be created. - `"VpcConfiguration"`: The VPC configuration to be provisioned for the host. A VPC must be configured and the infrastructure to be represented by the host must already be connected to the VPC. @@ -114,6 +112,150 @@ function create_host( ) end +""" + create_repository_link(connection_arn, owner_id, repository_name) + create_repository_link(connection_arn, owner_id, repository_name, params::Dict{String,<:Any}) + +Creates a link to a specified external Git repository. A repository link allows Git sync to +monitor and sync changes to files in a specified Git repository. + +# Arguments +- `connection_arn`: The Amazon Resource Name (ARN) of the connection to be associated with + the repository link. +- `owner_id`: The owner ID for the repository associated with a specific sync + configuration, such as the owner ID in GitHub. +- `repository_name`: The name of the repository to be associated with the repository link. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EncryptionKeyArn"`: The Amazon Resource Name (ARN) encryption key for the repository to + be associated with the repository link. +- `"Tags"`: The tags for the repository to be associated with the repository link. +""" +function create_repository_link( + ConnectionArn, + OwnerId, + RepositoryName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "CreateRepositoryLink", + Dict{String,Any}( + "ConnectionArn" => ConnectionArn, + "OwnerId" => OwnerId, + "RepositoryName" => RepositoryName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_repository_link( + ConnectionArn, + OwnerId, + RepositoryName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "CreateRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConnectionArn" => ConnectionArn, + "OwnerId" => OwnerId, + "RepositoryName" => RepositoryName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_sync_configuration(branch, config_file, repository_link_id, resource_name, role_arn, sync_type) + create_sync_configuration(branch, config_file, repository_link_id, resource_name, role_arn, sync_type, params::Dict{String,<:Any}) + +Creates a sync configuration which allows Amazon Web Services to sync content from a Git +repository to update a specified Amazon Web Services resource. Parameters for the sync +configuration are determined by the sync type. + +# Arguments +- `branch`: The branch in the repository from which changes will be synced. +- `config_file`: The file name of the configuration file that manages syncing between the + connection and the repository. This configuration file is stored in the repository. +- `repository_link_id`: The ID of the repository link created for the connection. A + repository link allows Git sync to monitor and sync changes to files in a specified Git + repository. +- `resource_name`: The name of the Amazon Web Services resource (for example, a + CloudFormation stack in the case of CFN_STACK_SYNC) that will be synchronized from the + linked repository. +- `role_arn`: The ARN of the IAM role that grants permission for Amazon Web Services to use + Git sync to update a given Amazon Web Services resource on your behalf. +- `sync_type`: The type of sync configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"PublishDeploymentStatus"`: Whether to enable or disable publishing of deployment status + to source providers. +- `"TriggerResourceUpdateOn"`: When to trigger Git sync to begin the stack update. +""" +function create_sync_configuration( + Branch, + ConfigFile, + RepositoryLinkId, + ResourceName, + RoleArn, + SyncType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "CreateSyncConfiguration", + Dict{String,Any}( + "Branch" => Branch, + "ConfigFile" => ConfigFile, + "RepositoryLinkId" => RepositoryLinkId, + "ResourceName" => ResourceName, + "RoleArn" => RoleArn, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_sync_configuration( + Branch, + ConfigFile, + RepositoryLinkId, + ResourceName, + RoleArn, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "CreateSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Branch" => Branch, + "ConfigFile" => ConfigFile, + "RepositoryLinkId" => RepositoryLinkId, + "ResourceName" => ResourceName, + "RoleArn" => RoleArn, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_connection(connection_arn) delete_connection(connection_arn, params::Dict{String,<:Any}) @@ -179,6 +321,85 @@ function delete_host( ) end +""" + delete_repository_link(repository_link_id) + delete_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Deletes the association between your connection and a specified external Git repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to be deleted. + +""" +function delete_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "DeleteRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "DeleteRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_sync_configuration(resource_name, sync_type) + delete_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Deletes the sync configuration for a specified repository and connection. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource associated with the sync + configuration to be deleted. +- `sync_type`: The type of sync configuration to be deleted. + +""" +function delete_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "DeleteSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "DeleteSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_connection(connection_arn) get_connection(connection_arn, params::Dict{String,<:Any}) @@ -242,6 +463,226 @@ function get_host( ) end +""" + get_repository_link(repository_link_id) + get_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Returns details about a repository link. A repository link allows Git sync to monitor and +sync changes from files in a specified Git repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to get. + +""" +function get_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "GetRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "GetRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_repository_sync_status(branch, repository_link_id, sync_type) + get_repository_sync_status(branch, repository_link_id, sync_type, params::Dict{String,<:Any}) + +Returns details about the sync status for a repository. A repository sync uses Git sync to +push and pull changes from your remote repository. + +# Arguments +- `branch`: The branch of the repository link for the requested repository sync status. +- `repository_link_id`: The repository link ID for the requested repository sync status. +- `sync_type`: The sync type of the requested sync status. + +""" +function get_repository_sync_status( + Branch, RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "GetRepositorySyncStatus", + Dict{String,Any}( + "Branch" => Branch, + "RepositoryLinkId" => RepositoryLinkId, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_repository_sync_status( + Branch, + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "GetRepositorySyncStatus", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Branch" => Branch, + "RepositoryLinkId" => RepositoryLinkId, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_resource_sync_status(resource_name, sync_type) + get_resource_sync_status(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns the status of the sync with the Git repository for a specific Amazon Web Services +resource. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync status with + the Git repository. +- `sync_type`: The sync type for the sync status with the Git repository. + +""" +function get_resource_sync_status( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "GetResourceSyncStatus", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_sync_status( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "GetResourceSyncStatus", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sync_blocker_summary(resource_name, sync_type) + get_sync_blocker_summary(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns a list of the most recent sync blockers. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource currently blocked from + automatically being synced from a Git repository. +- `sync_type`: The sync type for the sync blocker summary. + +""" +function get_sync_blocker_summary( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "GetSyncBlockerSummary", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sync_blocker_summary( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "GetSyncBlockerSummary", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sync_configuration(resource_name, sync_type) + get_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Returns details about a sync configuration, including the sync type and resource name. A +sync configuration allows the configuration to sync (push and pull) changes from the remote +repository for a specified branch in a Git repository. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync configuration + for which you want to retrieve information. +- `sync_type`: The sync type for the sync configuration for which you want to retrieve + information. + +""" +function get_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "GetSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "GetSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_connections() list_connections(params::Dict{String,<:Any}) @@ -298,6 +739,130 @@ function list_hosts( ) end +""" + list_repository_links() + list_repository_links(params::Dict{String,<:Any}) + +Lists the repository links created for connections in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: A non-zero, non-negative integer used to limit the number of returned + results. +- `"NextToken"`: An enumeration token that, when provided in a request, returns the next + batch of the results. +""" +function list_repository_links(; aws_config::AbstractAWSConfig=global_aws_config()) + return codestar_connections( + "ListRepositoryLinks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_repository_links( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "ListRepositoryLinks", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_repository_sync_definitions(repository_link_id, sync_type) + list_repository_sync_definitions(repository_link_id, sync_type, params::Dict{String,<:Any}) + +Lists the repository sync definitions for repository links in your account. + +# Arguments +- `repository_link_id`: The ID of the repository link for the sync definition for which you + want to retrieve information. +- `sync_type`: The sync type of the repository link for the the sync definition for which + you want to retrieve information. + +""" +function list_repository_sync_definitions( + RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "ListRepositorySyncDefinitions", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_repository_sync_definitions( + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "ListRepositorySyncDefinitions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_sync_configurations(repository_link_id, sync_type) + list_sync_configurations(repository_link_id, sync_type, params::Dict{String,<:Any}) + +Returns a list of sync configurations for a specified repository. + +# Arguments +- `repository_link_id`: The ID of the repository link for the requested list of sync + configurations. +- `sync_type`: The sync type for the requested list of sync configurations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: A non-zero, non-negative integer used to limit the number of returned + results. +- `"NextToken"`: An enumeration token that allows the operation to batch the results of the + operation. +""" +function list_sync_configurations( + RepositoryLinkId, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "ListSyncConfigurations", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_sync_configurations( + RepositoryLinkId, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "ListSyncConfigurations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RepositoryLinkId" => RepositoryLinkId, "SyncType" => SyncType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -379,7 +944,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags from an AWS resource. +Removes tags from an Amazon Web Services resource. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource to remove tags from. @@ -450,3 +1015,161 @@ function update_host( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_repository_link(repository_link_id) + update_repository_link(repository_link_id, params::Dict{String,<:Any}) + +Updates the association between your connection and a specified external Git repository. A +repository link allows Git sync to monitor and sync changes to files in a specified Git +repository. + +# Arguments +- `repository_link_id`: The ID of the repository link to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConnectionArn"`: The Amazon Resource Name (ARN) of the connection for the repository + link to be updated. The updated connection ARN must have the same providerType (such as + GitHub) as the original connection ARN for the repo link. +- `"EncryptionKeyArn"`: The Amazon Resource Name (ARN) of the encryption key for the + repository link to be updated. +""" +function update_repository_link( + RepositoryLinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "UpdateRepositoryLink", + Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_repository_link( + RepositoryLinkId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "UpdateRepositoryLink", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RepositoryLinkId" => RepositoryLinkId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_sync_blocker(id, resolved_reason, resource_name, sync_type) + update_sync_blocker(id, resolved_reason, resource_name, sync_type, params::Dict{String,<:Any}) + +Allows you to update the status of a sync blocker, resolving the blocker and allowing +syncing to continue. + +# Arguments +- `id`: The ID of the sync blocker to be updated. +- `resolved_reason`: The reason for resolving the sync blocker. +- `resource_name`: The name of the resource for the sync blocker to be updated. +- `sync_type`: The sync type of the sync blocker to be updated. + +""" +function update_sync_blocker( + Id, + ResolvedReason, + ResourceName, + SyncType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "UpdateSyncBlocker", + Dict{String,Any}( + "Id" => Id, + "ResolvedReason" => ResolvedReason, + "ResourceName" => ResourceName, + "SyncType" => SyncType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_sync_blocker( + Id, + ResolvedReason, + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "UpdateSyncBlocker", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Id" => Id, + "ResolvedReason" => ResolvedReason, + "ResourceName" => ResourceName, + "SyncType" => SyncType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_sync_configuration(resource_name, sync_type) + update_sync_configuration(resource_name, sync_type, params::Dict{String,<:Any}) + +Updates the sync configuration for your connection and a specified external Git repository. + +# Arguments +- `resource_name`: The name of the Amazon Web Services resource for the sync configuration + to be updated. +- `sync_type`: The sync type for the sync configuration to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Branch"`: The branch for the sync configuration to be updated. +- `"ConfigFile"`: The configuration file for the sync configuration to be updated. +- `"PublishDeploymentStatus"`: Whether to enable or disable publishing of deployment status + to source providers. +- `"RepositoryLinkId"`: The ID of the repository link for the sync configuration to be + updated. +- `"RoleArn"`: The ARN of the IAM role for the sync configuration to be updated. +- `"TriggerResourceUpdateOn"`: When to trigger Git sync to begin the stack update. +""" +function update_sync_configuration( + ResourceName, SyncType; aws_config::AbstractAWSConfig=global_aws_config() +) + return codestar_connections( + "UpdateSyncConfiguration", + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_sync_configuration( + ResourceName, + SyncType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codestar_connections( + "UpdateSyncConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceName" => ResourceName, "SyncType" => SyncType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/cognito_identity_provider.jl b/src/services/cognito_identity_provider.jl index 1521769c78..3a04a4a714 100644 --- a/src/services/cognito_identity_provider.jl +++ b/src/services/cognito_identity_provider.jl @@ -8,7 +8,11 @@ using AWS.UUIDs add_custom_attributes(custom_attributes, user_pool_id) add_custom_attributes(custom_attributes, user_pool_id, params::Dict{String,<:Any}) -Adds additional user attributes to the user pool schema. +Adds additional user attributes to the user pool schema. Amazon Cognito evaluates Identity +and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `custom_attributes`: An array of custom attributes, such as Mutable and Name. @@ -54,13 +58,21 @@ end admin_add_user_to_group(group_name, user_pool_id, username) admin_add_user_to_group(group_name, user_pool_id, username, params::Dict{String,<:Any}) -Adds the specified user to the specified group. Calling this action requires developer -credentials. +Adds a user to a group. A user who is in a group can present a preferred-role claim to an +identity pool, and populates a cognito:groups claim to their access and identity tokens. +Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this +API operation. For this operation, you must use IAM credentials to authorize requests, and +you must grant yourself the corresponding IAM permission in a policy. Learn more +Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and +user pool endpoints # Arguments -- `group_name`: The group name. +- `group_name`: The name of the group that you want to add your user to. - `user_pool_id`: The user pool ID for the user pool. -- `username`: The username for the user. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_add_user_to_group( @@ -104,12 +116,26 @@ end admin_confirm_sign_up(user_pool_id, username) admin_confirm_sign_up(user_pool_id, username, params::Dict{String,<:Any}) -Confirms user registration as an admin without using a confirmation code. Works on any -user. Calling this action requires developer credentials. +This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user +when they signed up in your user pool. After your user enters their code, they confirm +ownership of the email address or phone number that they provided, and their user account +becomes active. Depending on your user pool configuration, your users will receive their +confirmation code in an email or SMS message. Local users who signed up in your user pool +are the only type of user who can confirm sign-up with a code. Users who federate through +an external identity provider (IdP) have already been confirmed by their IdP. +Administrator-created users confirm their accounts when they respond to their invitation +email message and choose a password. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for which you want to confirm user registration. -- `username`: The user name for which you want to confirm user registration. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -180,13 +206,21 @@ create or update a user pool. This template includes your custom sign-up instruc placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until -they sign in and change their password. AdminCreateUser requires developer credentials. +they sign in and change their password. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool where the user will be created. -- `username`: The username for the user. Must be unique within the user pool. Must be a - UTF-8 string between 1 and 128 characters. After the user is created, the username can't be - changed. +- `username`: The value that you want to set as the username sign-in attribute. The + following conditions apply to the username parameter. The username can't be a duplicate + of another username in the same user pool. You can't change the value of a username after + you create it. You can only provide a value if usernames are a valid sign-in attribute + for your user pool. If your user pool only supports phone numbers or email addresses as + sign-in attributes, Amazon Cognito automatically generates a username value. For more + information, see Customizing sign-in attributes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -224,9 +258,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys temporary password in the sign-in page, along with a new password to be used in all future sign-ins. This parameter isn't required. If you don't specify a value, Amazon Cognito generates one for you. The temporary password can only be used until the user account - expiration limit that you specified when you created the user pool. To reset the account - after that time limit, you must call AdminCreateUser again, specifying \"RESEND\" for the - MessageAction parameter. + expiration limit that you set for your user pool. To reset the account after that time + limit, you must call AdminCreateUser again and specify RESEND for the MessageAction + parameter. - `"UserAttributes"`: An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than Username. However, any attributes that you specify as @@ -244,13 +278,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter. phone_number: The phone number of the user to whom the message that contains the code and username will be sent. Required if the phone_number_verified attribute is set to True, or if \"SMS\" is specified in the DesiredDeliveryMediums parameter. -- `"ValidationData"`: The user's validation data. This is an array of name-value pairs that - contain user attributes and attribute values that you can use for custom validation, such - as restricting the types of user accounts that can be registered. For example, you might - choose to allow or disallow user sign-up based on the user's domain. To configure custom - validation, you must create a Pre Sign-up Lambda trigger for the user pool as described in - the Amazon Cognito Developer Guide. The Lambda trigger receives the validation data and - uses it in the validation process. The user's validation data isn't persisted. +- `"ValidationData"`: Temporary user attributes that contribute to the outcomes of your pre + sign-up Lambda trigger. This set of key-value pairs are for custom validation of + information that you collect from your users but don't need to retain. Your Lambda function + can analyze this additional data and act on it. Your function might perform external API + operations like logging user attributes and validation data to Amazon CloudWatch Logs. + Validation data might also affect the response that your function returns to Amazon + Cognito, like automatically confirming the user if they sign up from within your network. + For more information about the pre sign-up Lambda trigger, see Pre sign-up Lambda trigger. """ function admin_create_user( UserPoolId, Username; aws_config::AbstractAWSConfig=global_aws_config() @@ -286,12 +321,18 @@ end admin_delete_user(user_pool_id, username) admin_delete_user(user_pool_id, username, params::Dict{String,<:Any}) -Deletes a user as an administrator. Works on any user. Calling this action requires -developer credentials. +Deletes a user as an administrator. Works on any user. Amazon Cognito evaluates Identity +and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to delete the user. -- `username`: The user name of the user you want to delete. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_delete_user( @@ -328,8 +369,12 @@ end admin_delete_user_attributes(user_attribute_names, user_pool_id, username) admin_delete_user_attributes(user_attribute_names, user_pool_id, username, params::Dict{String,<:Any}) -Deletes the user attributes in a user pool as an administrator. Works on any user. Calling -this action requires developer credentials. +Deletes the user attributes in a user pool as an administrator. Works on any user. Amazon +Cognito evaluates Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you must use IAM credentials to authorize requests, and you +must grant yourself the corresponding IAM permission in a policy. Learn more Signing +Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool +endpoints # Arguments - `user_attribute_names`: An array of strings representing the user attribute names you @@ -337,7 +382,10 @@ this action requires developer credentials. name. - `user_pool_id`: The user pool ID for the user pool where you want to delete user attributes. -- `username`: The user name of the user from which you would like to delete attributes. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_delete_user_attributes( @@ -392,10 +440,9 @@ native username + password user, they can't use their password to sign in. If th deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See -AdminLinkProviderForUser. This action is enabled only for admin access and requires -developer credentials. The ProviderName must match the value specified when creating an IdP -for the pool. To deactivate a native username + password user, the ProviderName value must -be Cognito and the ProviderAttributeName must be Cognito_Subject. The +AdminLinkProviderForUser. The ProviderName must match the value specified when creating an +IdP for the pool. To deactivate a native username + password user, the ProviderName value +must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user. The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was @@ -406,6 +453,11 @@ the SourceUser when the identities were originally linked using AdminLinkProvid call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion. +Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this +API operation. For this operation, you must use IAM credentials to authorize requests, and +you must grant yourself the corresponding IAM permission in a policy. Learn more +Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and +user pool endpoints # Arguments - `user`: The user to be disabled. @@ -445,13 +497,19 @@ end admin_disable_user(user_pool_id, username, params::Dict{String,<:Any}) Deactivates a user and revokes all access tokens for the user. A deactivated user can't -sign in, but still appears in the responses to GetUser and ListUsers API requests. You must -make this API request with Amazon Web Services credentials that have -cognito-idp:AdminDisableUser permissions. +sign in, but still appears in the responses to GetUser and ListUsers API requests. Amazon +Cognito evaluates Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you must use IAM credentials to authorize requests, and you +must grant yourself the corresponding IAM permission in a policy. Learn more Signing +Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool +endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to disable the user. -- `username`: The user name of the user you want to disable. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_disable_user( @@ -488,12 +546,19 @@ end admin_enable_user(user_pool_id, username) admin_enable_user(user_pool_id, username, params::Dict{String,<:Any}) -Enables the specified user as an administrator. Works on any user. Calling this action -requires developer credentials. +Enables the specified user as an administrator. Works on any user. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to enable the user. -- `username`: The user name of the user you want to enable. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_enable_user( @@ -530,12 +595,19 @@ end admin_forget_device(device_key, user_pool_id, username) admin_forget_device(device_key, user_pool_id, username, params::Dict{String,<:Any}) -Forgets the device, as an administrator. Calling this action requires developer credentials. +Forgets the device, as an administrator. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `device_key`: The device key. - `user_pool_id`: The user pool ID. -- `username`: The user name. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_forget_device( @@ -579,12 +651,19 @@ end admin_get_device(device_key, user_pool_id, username) admin_get_device(device_key, user_pool_id, username, params::Dict{String,<:Any}) -Gets the device, as an administrator. Calling this action requires developer credentials. +Gets the device, as an administrator. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `device_key`: The device key. - `user_pool_id`: The user pool ID. -- `username`: The user name. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_get_device( @@ -629,12 +708,19 @@ end admin_get_user(user_pool_id, username, params::Dict{String,<:Any}) Gets the specified user by user name in a user pool as an administrator. Works on any user. -Calling this action requires developer credentials. + Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for +this API operation. For this operation, you must use IAM credentials to authorize requests, +and you must grant yourself the corresponding IAM permission in a policy. Learn more +Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and +user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to get information about the user. -- `username`: The user name of the user you want to retrieve. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_get_user( @@ -682,7 +768,11 @@ Web Service, Amazon Simple Notification Service might place your account in the sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools -in the Amazon Cognito Developer Guide. Calling this action requires developer credentials. +in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `auth_flow`: The authentication flow for this call to run. The API action will depend on @@ -708,13 +798,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AuthParameters"`: The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app - client is configured with a client secret), DEVICE_KEY. For - REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the - app client is configured with a client secret), DEVICE_KEY. For ADMIN_NO_SRP_AUTH: - USERNAME (required), SECRET_HASH (if app client is configured with client secret), PASSWORD - (required), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client - is configured with client secret), DEVICE_KEY. To start the authentication flow with - password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). + client is configured with a client secret), DEVICE_KEY. For ADMIN_USER_PASSWORD_AUTH: + USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is + configured with a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: + REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a + client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app + client is configured with client secret), DEVICE_KEY. To start the authentication flow with + password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For + more information about SECRET_HASH, see Computing secret hash values. For information about + DEVICE_KEY, see Working with user devices in your user pool. - `"ClientMetadata"`: A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API @@ -728,14 +820,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys validationData value to enhance your workflow for your specific needs. When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication - Custom message Pre token generation Create auth challenge Define auth challenge - Verify auth challenge For more information, see Customizing user pool Workflows with - Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata - parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata - value. This data is available only to Lambda triggers that are assigned to a user pool to - support custom workflows. If your user pool configuration doesn't include triggers, the - ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt - the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + Custom message Pre token generation Create auth challenge Define auth challenge For + more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon + Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon + Cognito won't do the following: Store the ClientMetadata value. This data is available + only to Lambda triggers that are assigned to a user pool to support custom workflows. If + your user pool configuration doesn't include triggers, the ClientMetadata parameter serves + no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't + use Amazon Cognito to provide sensitive information. - `"ContextData"`: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon @@ -792,12 +884,16 @@ identity. When the user signs in with a federated user identity, they sign in as existing user account. The maximum number of federated identities linked to a user is five. Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external IdPs and -provider attributes that have been trusted by the application owner. This action is -administrative and requires developer credentials. +provider attributes that have been trusted by the application owner. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments - `destination_user`: The existing user in the user pool that you want to assign to the - external IdP user account. This user can be a native (Username + Password) Amazon Cognito + external IdP user account. This user can be a local (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in. For a native username + password @@ -814,13 +910,17 @@ administrative and requires developer credentials. IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value - as the id, sub, or user_id value found in the social IdP token. For SAML, the - ProviderAttributeName can be any value that matches a claim in the SAML assertion. If you - want to link SAML users based on the subject of the SAML assertion, you should map the - subject to a claim through the SAML IdP and submit that claim name as the - ProviderAttributeName. If you set ProviderAttributeName to Cognito_Subject, Amazon Cognito - will automatically parse the default unique identifier found in the subject from the SAML - token. + as the id, sub, or user_id value found in the social IdP token. For OIDC, the + ProviderAttributeName can be any value that matches a claim in the ID token, or that your + app retrieves from the userInfo endpoint. You must map the claim to a user pool attribute + in your IdP configuration, and set the user pool attribute name as the value of + ProviderAttributeName in your AdminLinkProviderForUser request. For SAML, the + ProviderAttributeName can be any value that matches a claim in the SAML assertion. To link + SAML users based on the subject of the SAML assertion, map the subject to a claim through + the SAML IdP and set that claim name as the value of ProviderAttributeName in your + AdminLinkProviderForUser request. For both OIDC and SAML users, when you set + ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the + default unique identifier found in the subject from the IdP token. - `user_pool_id`: The user pool ID for the user pool. """ @@ -870,16 +970,27 @@ end admin_list_devices(user_pool_id, username) admin_list_devices(user_pool_id, username, params::Dict{String,<:Any}) -Lists devices, as an administrator. Calling this action requires developer credentials. +Lists devices, as an administrator. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID. -- `username`: The user name. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Limit"`: The limit of the devices request. -- `"PaginationToken"`: The pagination token. +- `"PaginationToken"`: This API operation returns a limited number of results. The + pagination token is an identifier that you can present in an additional API request with + the same parameters. When you include the pagination token, Amazon Cognito returns the next + set of items after the current list. Subsequent requests return a new pagination token. By + use of this token, you can paginate through the full list of items. """ function admin_list_devices( UserPoolId, Username; aws_config::AbstractAWSConfig=global_aws_config() @@ -915,12 +1026,18 @@ end admin_list_groups_for_user(user_pool_id, username) admin_list_groups_for_user(user_pool_id, username, params::Dict{String,<:Any}) -Lists the groups that the user belongs to. Calling this action requires developer -credentials. +Lists the groups that a user belongs to. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool. -- `username`: The username for the user. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -963,11 +1080,18 @@ end admin_list_user_auth_events(user_pool_id, username, params::Dict{String,<:Any}) A history of user activity and any risks detected as part of Amazon Cognito advanced -security. +security. Amazon Cognito evaluates Identity and Access Management (IAM) policies in +requests for this API operation. For this operation, you must use IAM credentials to +authorize requests, and you must grant yourself the corresponding IAM permission in a +policy. Learn more Signing Amazon Web Services API Requests Using the Amazon +Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID. -- `username`: The user pool username or an alias. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1009,13 +1133,19 @@ end admin_remove_user_from_group(group_name, user_pool_id, username) admin_remove_user_from_group(group_name, user_pool_id, username, params::Dict{String,<:Any}) -Removes the specified user from the specified group. Calling this action requires developer -credentials. +Removes the specified user from the specified group. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `group_name`: The group name. - `user_pool_id`: The user pool ID for the user pool. -- `username`: The username for the user. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_remove_user_from_group( @@ -1060,30 +1190,38 @@ end admin_reset_user_password(user_pool_id, username, params::Dict{String,<:Any}) Resets the specified user's password in a user pool as an administrator. Works on any user. -When a developer calls this API, the current password is invalidated, so it must be -changed. If a user tries to sign in after the API is called, the app will get a -PasswordResetRequiredException exception back and should direct the user down the flow to -reset the password, which is the same as the forgot password flow. In addition, if the user -pool has phone verification selected and a verified phone number exists for the user, or if -email verification is selected and a verified email exists for the user, calling this API -will also result in sending a message to the end user with the code to change their -password. This action might generate an SMS text message. Starting June 1, 2021, US -telecom carriers require you to register an origination phone number before you can send -SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must -register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number -automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be -able to sign up, activate their accounts, or sign in. If you have never used SMS text -messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification -Service might place your account in the SMS sandbox. In sandbox mode , you can send -messages only to verified phone numbers. After you test your app while in the sandbox -environment, you can move out of the sandbox and into production. For more information, see - SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. -Calling this action requires developer credentials. +To use this API operation, your user pool must have self-service account recovery +configured. Use AdminSetUserPassword if you manage passwords as an administrator. This +action might generate an SMS text message. Starting June 1, 2021, US telecom carriers +require you to register an origination phone number before you can send SMS messages to US +phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone +number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. +Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, +activate their accounts, or sign in. If you have never used SMS text messages with Amazon +Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place +your account in the SMS sandbox. In sandbox mode , you can send messages only to verified +phone numbers. After you test your app while in the sandbox environment, you can move out +of the sandbox and into production. For more information, see SMS message settings for +Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's +password, requiring them to change it. If a user tries to sign in after the API is called, +Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then +perform the actions that reset your user's password: the forgot-password flow. In addition, +if the user pool has phone verification selected and a verified phone number exists for the +user, or if email verification is selected and a verified email exists for the user, +calling this API will also result in sending a message to the end user with the code to +change their password. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to reset the user's password. -- `username`: The user name of the user whose password you want to reset. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1137,18 +1275,27 @@ end admin_respond_to_auth_challenge(challenge_name, client_id, user_pool_id) admin_respond_to_auth_challenge(challenge_name, client_id, user_pool_id, params::Dict{String,<:Any}) -Responds to an authentication challenge, as an administrator. This action might generate -an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an -origination phone number before you can send SMS messages to US phone numbers. If you use -SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. -Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users -who must receive SMS messages might not be able to sign up, activate their accounts, or -sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon -Web Service, Amazon Simple Notification Service might place your account in the SMS -sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you -test your app while in the sandbox environment, you can move out of the sandbox and into -production. For more information, see SMS message settings for Amazon Cognito user pools -in the Amazon Cognito Developer Guide. Calling this action requires developer credentials. +Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for +device authentication that bypasses MFA, or for a custom authentication challenge. An +AdminRespondToAuthChallenge API request provides the answer to that challenge, like a code +or a secure remote password (SRP). The parameters of a response to an authentication +challenge vary with the type of challenge. For more information about custom authentication +challenges, see Custom authentication challenge Lambda triggers. This action might +generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to +register an origination phone number before you can send SMS messages to US phone numbers. +If you use SMS text messages in Amazon Cognito, you must register a phone number with +Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon +Cognito users who must receive SMS messages might not be able to sign up, activate their +accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any +other Amazon Web Service, Amazon Simple Notification Service might place your account in +the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. +After you test your app while in the sandbox environment, you can move out of the sandbox +and into production. For more information, see SMS message settings for Amazon Cognito +user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `challenge_name`: The challenge name. For more information, see AdminInitiateAuth. @@ -1159,26 +1306,41 @@ in the Amazon Cognito Developer Guide. Calling this action requires developer c Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AnalyticsMetadata"`: The analytics metadata for collecting Amazon Pinpoint metrics for AdminRespondToAuthChallenge calls. -- `"ChallengeResponses"`: The challenge responses. These are inputs corresponding to the - value of ChallengeName, for example: SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if - app client is configured with client secret). PASSWORD_VERIFIER: - PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, SECRET_HASH (if - app client is configured with client secret). PASSWORD_VERIFIER requires DEVICE_KEY when - signing in with a remembered device. ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH - (if app client is configured with client secret). NEW_PASSWORD_REQUIRED: NEW_PASSWORD, - USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required - attributes that Amazon Cognito returned as requiredAttributes in the AdminInitiateAuth - response, add a userAttributes.attributename parameter. This parameter can also set values - for writable attributes that aren't required by your user pool. In a NEW_PASSWORD_REQUIRED - challenge response, you can't modify a required attribute that already has a value. In - AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the - requiredAttributes parameter, then use the AdminUpdateUserAttributes API operation to - modify the value of any additional attributes. MFA_SETUP requires USERNAME, plus you - must use the session value returned by VerifySoftwareToken in the Session parameter. The - value of the USERNAME attribute must be the user's actual username, not an alias (such as - an email address or phone number). To make this simpler, the AdminInitiateAuth response - includes the actual username value in the USERNAMEUSER_ID_FOR_SRP attribute. This happens - even if you specified an alias in your call to AdminInitiateAuth. +- `"ChallengeResponses"`: The responses to the challenge that you received in the previous + request. Each challenge has its own required response parameters. The following examples + are partial JSON request bodies that highlight challenge-response parameters. You must + provide a SECRET_HASH parameter in all challenge responses to an app client that has a + client secret. SMS_MFA \"ChallengeName\": \"SMS_MFA\", \"ChallengeResponses\": + {\"SMS_MFA_CODE\": \"[SMS_code]\", \"USERNAME\": \"[username]\"} PASSWORD_VERIFIER + \"ChallengeName\": \"PASSWORD_VERIFIER\", \"ChallengeResponses\": + {\"PASSWORD_CLAIM_SIGNATURE\": \"[claim_signature]\", \"PASSWORD_CLAIM_SECRET_BLOCK\": + \"[secret_block]\", \"TIMESTAMP\": [timestamp], \"USERNAME\": \"[username]\"} Add + \"DEVICE_KEY\" when you sign in with a remembered device. CUSTOM_CHALLENGE + \"ChallengeName\": \"CUSTOM_CHALLENGE\", \"ChallengeResponses\": {\"USERNAME\": + \"[username]\", \"ANSWER\": \"[challenge_answer]\"} Add \"DEVICE_KEY\" when you sign in + with a remembered device. NEW_PASSWORD_REQUIRED \"ChallengeName\": + \"NEW_PASSWORD_REQUIRED\", \"ChallengeResponses\": {\"NEW_PASSWORD\": \"[new_password]\", + \"USERNAME\": \"[username]\"} To set any required attributes that InitiateAuth returned in + an requiredAttributes parameter, add \"userAttributes.[attribute_name]\": + \"[attribute_value]\". This parameter can also set values for writable attributes that + aren't required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you + can't modify a required attribute that already has a value. In RespondToAuthChallenge, set + a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then + use the UpdateUserAttributes API operation to modify the value of any additional + attributes. SOFTWARE_TOKEN_MFA \"ChallengeName\": \"SOFTWARE_TOKEN_MFA\", + \"ChallengeResponses\": {\"USERNAME\": \"[username]\", \"SOFTWARE_TOKEN_MFA_CODE\": + [authenticator_code]} DEVICE_SRP_AUTH \"ChallengeName\": \"DEVICE_SRP_AUTH\", + \"ChallengeResponses\": {\"USERNAME\": \"[username]\", \"DEVICE_KEY\": \"[device_key]\", + \"SRP_A\": \"[srp_a]\"} DEVICE_PASSWORD_VERIFIER \"ChallengeName\": + \"DEVICE_PASSWORD_VERIFIER\", \"ChallengeResponses\": {\"DEVICE_KEY\": \"[device_key]\", + \"PASSWORD_CLAIM_SIGNATURE\": \"[claim_signature]\", \"PASSWORD_CLAIM_SECRET_BLOCK\": + \"[secret_block]\", \"TIMESTAMP\": [timestamp], \"USERNAME\": \"[username]\"} MFA_SETUP + \"ChallengeName\": \"MFA_SETUP\", \"ChallengeResponses\": {\"USERNAME\": \"[username]\"}, + \"SESSION\": \"[Session ID from VerifySoftwareToken]\" SELECT_MFA_TYPE + \"ChallengeName\": \"SELECT_MFA_TYPE\", \"ChallengeResponses\": {\"USERNAME\": + \"[username]\", \"ANSWER\": \"[SMS_MFA or SOFTWARE_TOKEN_MFA]\"} For more information + about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see + Working with user devices in your user pool. - `"ClientMetadata"`: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, @@ -1253,11 +1415,18 @@ The user's multi-factor authentication (MFA) preference, including which MFA opt activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA -option will be returned during sign-in. +option will be returned during sign-in. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID. -- `username`: The user pool username or alias. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1305,13 +1474,29 @@ FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password. Once the user has set a new password, or the -password is permanent, the user status is set to Confirmed. +password is permanent, the user status is set to Confirmed. AdminSetUserPassword can set a +password for the user profile that Amazon Cognito creates for third-party federated users. +When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to +CONFIRMED. A user in this state can sign in as a federated user, and initiate +authentication flows in the API like a linked native user. They can also modify their +password and attributes in token-authenticated API requests like ChangePassword and +UpdateUserAttributes. As a best security practice and to keep users in sync with your +external IdP, don't set passwords on federated user profiles. To set up a federated user +for native sign-in with a linked native user, refer to Linking federated users to an +existing user profile. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `password`: The password for the user. - `user_pool_id`: The user pool ID for the user pool where you want to set the user's password. -- `username`: The user name of the user whose password you want to set. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1360,14 +1545,21 @@ end This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure -either type of MFA, use AdminSetUserMFAPreference instead. +either type of MFA, use AdminSetUserMFAPreference instead. Amazon Cognito evaluates +Identity and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `mfaoptions`: You can use this parameter only to set an SMS configuration that uses SMS for delivery. - `user_pool_id`: The ID of the user pool that contains the user whose options you're setting. -- `username`: The user name of the user whose options you're setting. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_set_user_settings( @@ -1413,13 +1605,24 @@ end Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of -Amazon Cognito advanced security. +Amazon Cognito advanced security. Amazon Cognito evaluates Identity and Access Management +(IAM) policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `event_id`: The authentication event ID. -- `feedback_value`: The authentication event feedback value. +- `feedback_value`: The authentication event feedback value. When you provide a + FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where + Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of + invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe + that Amazon Cognito evaluated a high-enough risk level. - `user_pool_id`: The user pool ID. -- `username`: The user pool username. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_update_auth_event_feedback( @@ -1472,13 +1675,19 @@ end admin_update_device_status(device_key, user_pool_id, username) admin_update_device_status(device_key, user_pool_id, username, params::Dict{String,<:Any}) -Updates the device status as an administrator. Calling this action requires developer -credentials. +Updates the device status as an administrator. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `device_key`: The device key. - `user_pool_id`: The user pool ID. -- `username`: The user name. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1526,21 +1735,26 @@ end admin_update_user_attributes(user_attributes, user_pool_id, username) admin_update_user_attributes(user_attributes, user_pool_id, username, params::Dict{String,<:Any}) -Updates the specified user's attributes, including developer attributes, as an -administrator. Works on any user. For custom attributes, you must prepend the custom: -prefix to the attribute name. In addition to updating user attributes, this API can also be -used to mark phone and email as verified. This action might generate an SMS text message. -Starting June 1, 2021, US telecom carriers require you to register an origination phone -number before you can send SMS messages to US phone numbers. If you use SMS text messages -in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito -uses the registered number automatically. Otherwise, Amazon Cognito users who must receive -SMS messages might not be able to sign up, activate their accounts, or sign in. If you have -never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon -Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , -you can send messages only to verified phone numbers. After you test your app while in the -sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. Calling this action requires developer credentials. + This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers +require you to register an origination phone number before you can send SMS messages to US +phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone +number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. +Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, +activate their accounts, or sign in. If you have never used SMS text messages with Amazon +Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place +your account in the SMS sandbox. In sandbox mode , you can send messages only to verified +phone numbers. After you test your app while in the sandbox environment, you can move out +of the sandbox and into production. For more information, see SMS message settings for +Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified +user's attributes, including developer attributes, as an administrator. Works on any user. +To delete an attribute from your user, submit the attribute in your API request with a +blank value. For custom attributes, you must prepend the custom: prefix to the attribute +name. In addition to updating user attributes, this API can also be used to mark phone and +email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies +in requests for this API operation. For this operation, you must use IAM credentials to +authorize requests, and you must grant yourself the corresponding IAM permission in a +policy. Learn more Signing Amazon Web Services API Requests Using the Amazon +Cognito user pools API and user pool endpoints # Arguments - `user_attributes`: An array of name-value pairs representing user attributes. For custom @@ -1556,7 +1770,10 @@ Developer Guide. Calling this action requires developer credentials. verification to true, Amazon Cognito doesn’t send a verification message to your user. - `user_pool_id`: The user pool ID for the user pool where you want to update user attributes. -- `username`: The user name of the user for whom you want to update user attributes. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1620,16 +1837,29 @@ end admin_user_global_sign_out(user_pool_id, username) admin_user_global_sign_out(user_pool_id, username, params::Dict{String,<:Any}) -Signs out a user from all devices. You must sign AdminUserGlobalSignOut requests with -Amazon Web Services credentials. It also invalidates all refresh tokens that Amazon Cognito -has issued to a user. The user's current access and ID tokens remain valid until they -expire. By default, access and ID tokens expire one hour after they're issued. A user can -still use a hosted UI cookie to retrieve new tokens for the duration of the cookie validity -period of 1 hour. Calling this action requires developer credentials. +Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. +Call this operation with your administrative credentials when your user signs out of your +app. This results in the following behavior. Amazon Cognito no longer accepts +token-authorized user operations that you authorize with a signed-out user's access tokens. +For more information, see Using the Amazon Cognito user pools API and user pool endpoints. +Amazon Cognito returns an Access Token has been revoked error when your app attempts to +authorize a user pools API request with a revoked access token that contains the scope +aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID +token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its +user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts +a signed-out user's refresh tokens in refresh requests. Other requests might be valid +until your user's token expires. Amazon Cognito evaluates Identity and Access Management +(IAM) policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID. -- `username`: The user name. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function admin_user_global_sign_out( @@ -1677,7 +1907,11 @@ alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETU SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. -Respond to this challenge with your user's TOTP. +Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity +and Access Management (IAM) policies in requests for this API operation. For this +operation, you can't use IAM credentials to authorize requests, and you can't grant IAM +permissions in policies. For more information about authorization models in Amazon Cognito, +see Using the Amazon Cognito user pools API and user pool endpoints. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1706,7 +1940,13 @@ end change_password(access_token, previous_password, proposed_password) change_password(access_token, previous_password, proposed_password, params::Dict{String,<:Any}) -Changes the password for a specified user in a user pool. +Changes the password for a specified user in a user pool. Authorize this action with a +signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. +Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests +for this API operation. For this operation, you can't use IAM credentials to authorize +requests, and you can't grant IAM permissions in policies. For more information about +authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and +user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose @@ -1761,7 +2001,14 @@ end confirm_device(access_token, device_key) confirm_device(access_token, device_key, params::Dict{String,<:Any}) -Confirms tracking of the device. This API call is the call that begins device tracking. +Confirms tracking of the device. This API call is the call that begins device tracking. For +more information about device authentication, see Working with user devices in your user +pool. Authorize this action with a signed-in user's access token. It must include the scope +aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose device @@ -1807,15 +2054,21 @@ end confirm_forgot_password(client_id, confirmation_code, password, username) confirm_forgot_password(client_id, confirmation_code, password, username, params::Dict{String,<:Any}) -Allows a user to enter a confirmation code to reset a forgotten password. +Allows a user to enter a confirmation code to reset a forgotten password. Amazon Cognito +doesn't evaluate Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you can't use IAM credentials to authorize requests, and you +can't grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `client_id`: The app client ID of the app associated with the user pool. - `confirmation_code`: The confirmation code from your user's request to reset their password. For more information, see ForgotPassword. - `password`: The new password that your user wants to set. -- `username`: The user name of the user for whom you want to enter a code to retrieve a - forgotten password. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1837,7 +2090,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. - `"SecretHash"`: A keyed-hash message authentication code (HMAC) calculated using the - secret key of a user pool client and username plus the client ID in the message. + secret key of a user pool client and username plus the client ID in the message. For more + information about SecretHash, see Computing secret hash values. - `"UserContextData"`: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to @@ -1893,13 +2147,30 @@ end confirm_sign_up(client_id, confirmation_code, username) confirm_sign_up(client_id, confirmation_code, username, params::Dict{String,<:Any}) -Confirms registration of a new user. +This public API operation provides a code that Amazon Cognito sent to your user when they +signed up in your user pool via the SignUp API operation. After your user enters their +code, they confirm ownership of the email address or phone number that they provided, and +their user account becomes active. Depending on your user pool configuration, your users +will receive their confirmation code in an email or SMS message. Local users who signed up +in your user pool are the only type of user who can confirm sign-up with a code. Users who +federate through an external identity provider (IdP) have already been confirmed by their +IdP. Administrator-created users, users created with the AdminCreateUser API operation, +confirm their accounts when they respond to their invitation email message and choose a +password. They do not receive a confirmation code. Instead, they receive a temporary +password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in +requests for this API operation. For this operation, you can't use IAM credentials to +authorize requests, and you can't grant IAM permissions in policies. For more information +about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API +and user pool endpoints. # Arguments - `client_id`: The ID of the app client associated with the user pool. - `confirmation_code`: The confirmation code sent by a user's request to confirm registration. -- `username`: The user name of the user whose registration you want to confirm. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1975,8 +2246,11 @@ end create_group(group_name, user_pool_id) create_group(group_name, user_pool_id, params::Dict{String,<:Any}) -Creates a new group in the specified user pool. Calling this action requires developer -credentials. +Creates a new group in the specified user pool. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `group_name`: The name of the group. Must be unique. @@ -2032,19 +2306,82 @@ end create_identity_provider(provider_details, provider_name, provider_type, user_pool_id) create_identity_provider(provider_details, provider_name, provider_type, user_pool_id, params::Dict{String,<:Any}) -Creates an IdP for a user pool. +Adds a configuration and trust relationship between a third-party identity provider (IdP) +and a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in +requests for this API operation. For this operation, you must use IAM credentials to +authorize requests, and you must grant yourself the corresponding IAM permission in a +policy. Learn more Signing Amazon Web Services API Requests Using the Amazon +Cognito user pools API and user pool endpoints # Arguments -- `provider_details`: The IdP details. The following list describes the provider detail - keys for each IdP type. For Google and Login with Amazon: client_id client_secret - authorize_scopes For Facebook: client_id client_secret authorize_scopes - api_version For Sign in with Apple: client_id team_id key_id private_key - authorize_scopes For OpenID Connect (OIDC) providers: client_id client_secret - attributes_request_method oidc_issuer authorize_scopes The following keys are only - present if Amazon Cognito didn't discover them at the oidc_issuer URL. authorize_url - token_url attributes_url jwks_uri Amazon Cognito sets the value of the following - keys automatically. They are read-only. attributes_url_add_attributes For SAML - providers: MetadataFile or MetadataURL IDPSignout optional +- `provider_details`: The scopes, URLs, and identifiers for your external identity + provider. The following examples describe the provider detail keys for each IdP type. These + values and their schema are subject to change. Social IdP authorize_scopes values must + match the values listed here. OpenID Connect (OIDC) Amazon Cognito accepts the following + elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, + authorize_url, jwks_uri, token_url. Create or update request: \"ProviderDetails\": { + \"attributes_request_method\": \"GET\", \"attributes_url\": + \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", + \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": + \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": + \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": + \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" } Describe + response: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", + \"attributes_url\": \"https://auth.example.com/userInfo\", + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile + email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": + \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": + \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": + \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" } SAML Create + or update request with Metadata URL: \"ProviderDetails\": { \"IDPInit\": \"true\", + \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": + \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" + } Create or update request with Metadata file: \"ProviderDetails\": { \"IDPInit\": + \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": + \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" } The value of + MetadataFile must be the plaintext metadata document with all quote (\") characters escaped + by backslashes. Describe response: \"ProviderDetails\": { \"IDPInit\": \"true\", + \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", + \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": + \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": + \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", + \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" } LoginWithAmazon + Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"profile + postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", + \"client_secret\": \"provider-app-client-secret\" Describe response: \"ProviderDetails\": + { \"attributes_url\": \"https://api.amazon.com/user/profile\", + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile + postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": + \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": + \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": + \"https://api.amazon.com/auth/o2/token\" } Google Create or update request: + \"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": + \"1example23456789.apps.googleusercontent.com\", \"client_secret\": + \"provider-app-client-secret\" } Describe response: \"ProviderDetails\": { + \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", + \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile + openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", + \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": + \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", + \"token_request_method\": \"POST\", \"token_url\": + \"https://www.googleapis.com/oauth2/v4/token\" } SignInWithApple Create or update + request: \"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": + \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", + \"team_id\": \"3EXAMPLE\" } Describe response: \"ProviderDetails\": { + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", + \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": + \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": + \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": + \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" } Facebook Create or + update request: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": + \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": + \"provider-app-client-secret\" } Describe response: \"ProviderDetails\": { + \"api_version\": \"v17.0\", \"attributes_url\": + \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": + \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": + \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", + \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", + \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" } - `provider_name`: The IdP name. - `provider_type`: The IdP type. - `user_pool_id`: The user pool ID. @@ -2105,12 +2442,19 @@ end create_resource_server(identifier, name, user_pool_id) create_resource_server(identifier, name, user_pool_id, params::Dict{String,<:Any}) -Creates a new OAuth2.0 resource server and defines custom scopes within it. +Creates a new OAuth2.0 resource server and defines custom scopes within it. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments -- `identifier`: A unique resource server identifier for the resource server. This could be - an HTTPS endpoint where the resource server is located, such as - https://my-weather-api.example.com. +- `identifier`: A unique resource server identifier for the resource server. The identifier + can be an API friendly name like solar-system-data. You can also set an API URL like + https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents + scopes in the access token in the format resource-server-identifier/scope. Longer + scope-identifier strings increase the size of your access tokens. - `name`: A friendly name for the resource server. - `user_pool_id`: The user pool ID for the user pool. @@ -2158,7 +2502,11 @@ end create_user_import_job(cloud_watch_logs_role_arn, job_name, user_pool_id) create_user_import_job(cloud_watch_logs_role_arn, job_name, user_pool_id, params::Dict{String,<:Any}) -Creates the user import job. +Creates a user import job. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `cloud_watch_logs_role_arn`: The role ARN for the Amazon CloudWatch Logs Logging role for @@ -2213,8 +2561,7 @@ end create_user_pool(pool_name) create_user_pool(pool_name, params::Dict{String,<:Any}) -Creates a new Amazon Cognito user pool and sets the password policy for the pool. This -action might generate an SMS text message. Starting June 1, 2021, US telecom carriers + This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. @@ -2224,7 +2571,13 @@ Cognito or any other Amazon Web Service, Amazon Simple Notification Service migh your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. +Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon +Cognito user pool and sets the password policy for the pool. If you don't provide a value +for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates +Identity and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `pool_name`: A string used to name the user pool. @@ -2281,17 +2634,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For more information, see Verifying updates to email addresses and phone numbers. -- `"UserPoolAddOns"`: Enables advanced security risk detection. Set the key - AdvancedSecurityMode to the value \"AUDIT\". +- `"UserPoolAddOns"`: User pool add-ons. Contains settings for activation of advanced + security features. To log user security information but take no action, set to AUDIT. To + configure automatic security responses to risky traffic to your user pool, set to ENFORCED. + For more information, see Adding advanced security to a user pool. - `"UserPoolTags"`: The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria. - `"UsernameAttributes"`: Specifies whether a user can use an email address or phone number as a username when they sign up. - `"UsernameConfiguration"`: Case sensitivity on the username input for the selected - sign-in option. For example, when case sensitivity is set to False, users can sign in using - either \"username\" or \"Username\". This configuration is immutable once it has been set. - For more information, see UsernameConfigurationType. + sign-in option. When case sensitivity is set to False (case insensitive), users can sign in + with any combination of capital and lowercase letters. For example, username, USERNAME, or + UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set + case sensitivity to False (case insensitive) as a best practice. When usernames and email + addresses are case insensitive, Amazon Cognito treats any variation in case as the same + user, and prevents a case variation from being assigned to the same attribute for a + different user. This configuration is immutable after you set it. For more information, see + UsernameConfigurationType. - `"VerificationMessageTemplate"`: The template for the verification message that the user sees when the app requests permission to access the user's information. """ @@ -2323,7 +2683,13 @@ end create_user_pool_client(client_name, user_pool_id, params::Dict{String,<:Any}) Creates the user pool client. When you create a new user pool client, token revocation is -automatically activated. For more information about revoking tokens, see RevokeToken. +automatically activated. For more information about revoking tokens, see RevokeToken. If +you don't provide a value for an attribute, Amazon Cognito sets it to its default value. +Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this +API operation. For this operation, you must use IAM credentials to authorize requests, and +you must grant yourself the corresponding IAM permission in a policy. Learn more +Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and +user pool endpoints # Arguments - `client_name`: The client name for the user pool client you would like to create. @@ -2340,14 +2706,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds. If you don't specify otherwise in the configuration of your app client, your access tokens are valid for one hour. -- `"AllowedOAuthFlows"`: The allowed OAuth flows. code Use a code grant flow, which +- `"AllowedOAuthFlows"`: The OAuth grant types that you want your app client to generate. + To create an app client that generates client credentials grants, you must add + client_credentials as the only allowed OAuth flow. code Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token endpoint. implicit Issue the access token (and, optionally, ID token, based on scopes) directly to your user. client_credentials Issue the access token from the /oauth2/token endpoint directly to a non-person user using a combination of the client ID and client secret. -- `"AllowedOAuthFlowsUserPoolClient"`: Set to true if the client is allowed to follow the - OAuth protocol when interacting with Amazon Cognito user pools. +- `"AllowedOAuthFlowsUserPoolClient"`: Set to true to use OAuth 2.0 features in your user + pool app client. AllowedOAuthFlowsUserPoolClient must be true before you can configure the + following features in your app client. CallBackURLs: Callback URLs. LogoutURLs: + Sign-out redirect URLs. AllowedOAuthScopes: OAuth 2.0 scopes. AllowedOAuthFlows: + Support for authorization code, implicit, and client credentials OAuth 2.0 grants. To use + OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set + AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient + API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with + the CLI or SDKs, it defaults to false. - `"AllowedOAuthScopes"`: The allowed OAuth scopes. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. @@ -2366,10 +2741,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. -- `"DefaultRedirectURI"`: The default redirect URI. Must be in the CallbackURLs list. A +- `"DefaultRedirectURI"`: The default redirect URI. In app clients with one assigned IdP, + replaces redirect_uri in authentication requests. Must be in the CallbackURLs list. A redirect URI must: Be an absolute URI. Be registered with the authorization server. - Not include a fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito - requires HTTPS over HTTP except for http://localhost for testing purposes only. App + Not include a fragment component. For more information, see Default redirect URI. Amazon + Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. - `"EnablePropagateAdditionalUserContextData"`: Activates the propagation of additional user context data. For more information about propagation of user context data, see Adding @@ -2404,8 +2780,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request. For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their - session with their ID token for 10 hours. The default time unit for AccessTokenValidity in - an API request is hours. Valid range is displayed below in seconds. If you don't specify + session with their ID token for 10 hours. The default time unit for IdTokenValidity in an + API request is hours. Valid range is displayed below in seconds. If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour. - `"LogoutURLs"`: A list of allowed logout URLs for the IdPs. - `"PreventUserExistenceErrors"`: Errors and responses that you want Amazon Cognito APIs to @@ -2417,7 +2793,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. -- `"ReadAttributes"`: The read attributes. +- `"ReadAttributes"`: The list of user attributes that you want your app client to have + read-only access to. After your user authenticates in your app, their access token + authorizes them to read their own attribute value for any attribute in this list. An + example of this kind of activity is when your user selects a link to view their profile + information. Your app makes a GetUser API request to retrieve and display your user's + profile data. When you don't specify the ReadAttributes for your app client, your app can + read the values of email_verified, phone_number_verified, and the Standard attributes of + your user pool. When your user pool has read access to these default attributes, + ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes + in the API response if you have specified your own custom set of read attributes. - `"RefreshTokenValidity"`: The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for RefreshTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request. For @@ -2433,12 +2818,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. - `"TokenValidityUnits"`: The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours. -- `"WriteAttributes"`: The user pool attributes that the app client can write to. If your - app client allows users to sign in through an IdP, this array must include all attributes - that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users - sign in to your application through an IdP. If your app client does not have write access - to a mapped attribute, Amazon Cognito throws an error when it tries to update the - attribute. For more information, see Specifying IdP Attribute Mappings for Your user pool. +- `"WriteAttributes"`: The list of user attributes that you want your app client to have + write access to. After your user authenticates in your app, their access token authorizes + them to set or modify their own attribute value for any attribute in this list. An example + of this kind of activity is when you present your user with a form to update their profile + information and they change their last name. Your app then makes an UpdateUserAttributes + API request and sets family_name to the new value. When you don't specify the + WriteAttributes for your app client, your app can write the values of the Standard + attributes of your user pool. When your user pool has write access to these default + attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates + WriteAttributes in the API response if you have specified your own custom set of write + attributes. If your app client allows users to sign in through an IdP, this array must + include all attributes that you have mapped to IdP attributes. Amazon Cognito updates + mapped attributes when users sign in to your application through an IdP. If your app client + does not have write access to a mapped attribute, Amazon Cognito throws an error when it + tries to update the attribute. For more information, see Specifying IdP Attribute Mappings + for Your user pool. """ function create_user_pool_client( ClientName, UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2474,7 +2869,11 @@ end create_user_pool_domain(domain, user_pool_id) create_user_pool_domain(domain, user_pool_id, params::Dict{String,<:Any}) -Creates a new domain for a user pool. +Creates a new domain for a user pool. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `domain`: The domain string. For custom domains, this is the fully-qualified domain name, @@ -2649,7 +3048,13 @@ end delete_user(access_token) delete_user(access_token, params::Dict{String,<:Any}) -Allows a user to delete himself or herself. +Allows a user to delete their own user profile. Authorize this action with a signed-in +user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon +Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this +API operation. For this operation, you can't use IAM credentials to authorize requests, and +you can't grant IAM permissions in policies. For more information about authorization +models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool +endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose user @@ -2683,7 +3088,12 @@ end delete_user_attributes(access_token, user_attribute_names) delete_user_attributes(access_token, user_attribute_names, params::Dict{String,<:Any}) -Deletes the attributes for a user. +Deletes the attributes for a user. Authorize this action with a signed-in user's access +token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose @@ -2894,7 +3304,11 @@ end Describes a resource server. # Arguments -- `identifier`: The identifier for the resource server +- `identifier`: A unique resource server identifier for the resource server. The identifier + can be an API friendly name like solar-system-data. You can also set an API URL like + https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents + scopes in the access token in the format resource-server-identifier/scope. Longer + scope-identifier strings increase the size of your access tokens. - `user_pool_id`: The user pool ID for the user pool that hosts the resource server. """ @@ -3011,7 +3425,12 @@ end describe_user_pool(user_pool_id) describe_user_pool(user_pool_id, params::Dict{String,<:Any}) -Returns the configuration information and metadata of the specified user pool. +Returns the configuration information and metadata of the specified user pool. Amazon +Cognito evaluates Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you must use IAM credentials to authorize requests, and you +must grant yourself the corresponding IAM permission in a policy. Learn more Signing +Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool +endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool you want to describe. @@ -3045,7 +3464,11 @@ end describe_user_pool_client(client_id, user_pool_id, params::Dict{String,<:Any}) Client method for returning the configuration information and metadata of the specified -user pool app client. +user pool app client. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `client_id`: The app client ID of the app associated with the user pool. @@ -3119,7 +3542,13 @@ end forget_device(device_key) forget_device(device_key, params::Dict{String,<:Any}) -Forgets the specified device. +Forgets the specified device. For more information about device authentication, see Working +with user devices in your user pool. Authorize this action with a signed-in user's access +token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `device_key`: The device key. @@ -3160,9 +3589,17 @@ Calling this API causes a message to be sent to the end user with a confirmation is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in -the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email -exists, an InvalidParameterException is thrown. To use the confirmation code for resetting -the password, call ConfirmForgotPassword. This action might generate an SMS text message. +the Amazon Cognito Developer Guide. To use the confirmation code for resetting the +password, call ConfirmForgotPassword. If neither a verified phone number nor a verified +email exists, this API returns InvalidParameterException. If your app client has a client +secret and you don't provide a SECRET_HASH parameter, this API returns +NotAuthorizedException. To use this API operation, your user pool must have self-service +account recovery configured. Use AdminSetUserPassword if you manage passwords as an +administrator. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you can't use IAM +credentials to authorize requests, and you can't grant IAM permissions in policies. For +more information about authorization models in Amazon Cognito, see Using the Amazon Cognito +user pools API and user pool endpoints. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito @@ -3177,8 +3614,10 @@ Developer Guide. # Arguments - `client_id`: The ID of the client associated with the user pool. -- `username`: The user name of the user for whom you want to enter a code to reset a - forgotten password. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3275,7 +3714,13 @@ end get_device(device_key) get_device(device_key, params::Dict{String,<:Any}) -Gets the device. +Gets the device. For more information about device authentication, see Working with user +devices in your user pool. Authorize this action with a signed-in user's access token. It +must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate +Identity and Access Management (IAM) policies in requests for this API operation. For this +operation, you can't use IAM credentials to authorize requests, and you can't grant IAM +permissions in policies. For more information about authorization models in Amazon Cognito, +see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `device_key`: The device key. @@ -3390,6 +3835,42 @@ function get_identity_provider_by_identifier( ) end +""" + get_log_delivery_configuration(user_pool_id) + get_log_delivery_configuration(user_pool_id, params::Dict{String,<:Any}) + +Gets the detailed activity logging configuration for a user pool. + +# Arguments +- `user_pool_id`: The ID of the user pool where you want to view detailed activity logging + configuration. + +""" +function get_log_delivery_configuration( + UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cognito_identity_provider( + "GetLogDeliveryConfiguration", + Dict{String,Any}("UserPoolId" => UserPoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_log_delivery_configuration( + UserPoolId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cognito_identity_provider( + "GetLogDeliveryConfiguration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("UserPoolId" => UserPoolId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_signing_certificate(user_pool_id) get_signing_certificate(user_pool_id, params::Dict{String,<:Any}) @@ -3471,7 +3952,13 @@ end get_user(access_token) get_user(access_token, params::Dict{String,<:Any}) -Gets the user attributes and metadata for a user. +Gets the user attributes and metadata for a user. Authorize this action with a signed-in +user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon +Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this +API operation. For this operation, you can't use IAM credentials to authorize requests, and +you can't grant IAM permissions in policies. For more information about authorization +models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool +endpoints. # Arguments - `access_token`: A non-expired access token for the user whose information you want to @@ -3506,18 +3993,24 @@ end get_user_attribute_verification_code(access_token, attribute_name, params::Dict{String,<:Any}) Generates a user attribute verification code for the specified attribute name. Sends a -message to a user with a code that they must return in a VerifyUserAttribute request. This -action might generate an SMS text message. Starting June 1, 2021, US telecom carriers -require you to register an origination phone number before you can send SMS messages to US -phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone -number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. -Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, -activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. +message to a user with a code that they must return in a VerifyUserAttribute request. +Authorize this action with a signed-in user's access token. It must include the scope +aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. This action might generate an SMS text +message. Starting June 1, 2021, US telecom carriers require you to register an origination +phone number before you can send SMS messages to US phone numbers. If you use SMS text +messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon +Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must +receive SMS messages might not be able to sign up, activate their accounts, or sign in. If +you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, +Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox +mode , you can send messages only to verified phone numbers. After you test your app while +in the sandbox environment, you can move out of the sandbox and into production. For more +information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito +Developer Guide. # Arguments - `access_token`: A non-expired access token for the user whose attribute verification code @@ -3615,9 +4108,23 @@ end global_sign_out(access_token) global_sign_out(access_token, params::Dict{String,<:Any}) -Signs out users from all devices. It also invalidates all refresh tokens that Amazon -Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new -tokens for the duration of the 1-hour cookie validity period. +Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. +Call this operation when your user signs out of your app. This results in the following +behavior. Amazon Cognito no longer accepts token-authorized user operations that you +authorize with a signed-out user's access tokens. For more information, see Using the +Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access +Token has been revoked error when your app attempts to authorize a user pools API request +with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon +Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity +pool with ServerSideTokenCheck enabled for its user pool IdP configuration in +CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh +tokens in refresh requests. Other requests might be valid until your user's token +expires. Authorize this action with a signed-in user's access token. It must include the +scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user who you want @@ -3653,15 +4160,19 @@ end Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in -through a third party. This action might generate an SMS text message. Starting June 1, -2021, US telecom carriers require you to register an origination phone number before you -can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, -you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered -number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might -not be able to sign up, activate their accounts, or sign in. If you have never used SMS -text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple -Notification Service might place your account in the SMS sandbox. In sandbox mode , you -can send messages only to verified phone numbers. After you test your app while in the +through a third party. Amazon Cognito doesn't evaluate Identity and Access Management +(IAM) policies in requests for this API operation. For this operation, you can't use IAM +credentials to authorize requests, and you can't grant IAM permissions in policies. For +more information about authorization models in Amazon Cognito, see Using the Amazon Cognito +user pools API and user pool endpoints. This action might generate an SMS text message. +Starting June 1, 2021, US telecom carriers require you to register an origination phone +number before you can send SMS messages to US phone numbers. If you use SMS text messages +in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito +uses the registered number automatically. Otherwise, Amazon Cognito users who must receive +SMS messages might not be able to sign up, activate their accounts, or sign in. If you have +never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon +Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , +you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @@ -3688,12 +4199,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AuthParameters"`: The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app - client is configured with a client secret), DEVICE_KEY. For - REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the - app client is configured with a client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME - (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To - start the authentication flow with password verification, include ChallengeName: SRP_A and - SRP_A: (The SRP_A Value). + client is configured with a client secret), DEVICE_KEY. For USER_PASSWORD_AUTH: USERNAME + (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with + a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN + (required), SECRET_HASH (required if the app client is configured with a client secret), + DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is + configured with client secret), DEVICE_KEY. To start the authentication flow with password + verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For more + information about SECRET_HASH, see Computing secret hash values. For information about + DEVICE_KEY, see Working with user devices in your user pool. - `"ClientMetadata"`: A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, @@ -3707,14 +4221,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specific needs. When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication Custom message Pre token generation Create auth - challenge Define auth challenge Verify auth challenge For more information, see - Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. - When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the - following: Store the ClientMetadata value. This data is available only to Lambda triggers - that are assigned to a user pool to support custom workflows. If your user pool - configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. - Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon - Cognito to provide sensitive information. + challenge Define auth challenge For more information, see Customizing user pool + Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the + ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the + ClientMetadata value. This data is available only to Lambda triggers that are assigned to a + user pool to support custom workflows. If your user pool configuration doesn't include + triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata + value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive + information. - `"UserContextData"`: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to @@ -3754,7 +4268,14 @@ end list_devices(access_token) list_devices(access_token, params::Dict{String,<:Any}) -Lists the sign-in devices that Amazon Cognito has registered to the current user. +Lists the sign-in devices that Amazon Cognito has registered to the current user. For more +information about device authentication, see Working with user devices in your user pool. +Authorize this action with a signed-in user's access token. It must include the scope +aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose list of @@ -3763,7 +4284,11 @@ Lists the sign-in devices that Amazon Cognito has registered to the current user # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Limit"`: The limit of the device request. -- `"PaginationToken"`: The pagination token for the list request. +- `"PaginationToken"`: This API operation returns a limited number of results. The + pagination token is an identifier that you can present in an additional API request with + the same parameters. When you include the pagination token, Amazon Cognito returns the next + set of items after the current list. Subsequent requests return a new pagination token. By + use of this token, you can paginate through the full list of items. """ function list_devices(AccessToken; aws_config::AbstractAWSConfig=global_aws_config()) return cognito_identity_provider( @@ -3792,8 +4317,11 @@ end list_groups(user_pool_id) list_groups(user_pool_id, params::Dict{String,<:Any}) -Lists the groups associated with a user pool. Calling this action requires developer -credentials. +Lists the groups associated with a user pool. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool. @@ -3831,7 +4359,11 @@ end list_identity_providers(user_pool_id) list_identity_providers(user_pool_id, params::Dict{String,<:Any}) -Lists information about all IdPs for a user pool. +Lists information about all IdPs for a user pool. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID. @@ -3870,7 +4402,11 @@ end list_resource_servers(user_pool_id) list_resource_servers(user_pool_id, params::Dict{String,<:Any}) -Lists the resource servers for a user pool. +Lists the resource servers for a user pool. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool. @@ -3948,7 +4484,11 @@ end list_user_import_jobs(max_results, user_pool_id) list_user_import_jobs(max_results, user_pool_id, params::Dict{String,<:Any}) -Lists the user import jobs. +Lists user import jobs for a user pool. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `max_results`: The maximum number of import jobs you want the request to return. @@ -3956,8 +4496,11 @@ Lists the user import jobs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"PaginationToken"`: An identifier that was returned from the previous call to - ListUserImportJobs, which can be used to return the next set of import jobs in the list. +- `"PaginationToken"`: This API operation returns a limited number of results. The + pagination token is an identifier that you can present in an additional API request with + the same parameters. When you include the pagination token, Amazon Cognito returns the next + set of items after the current list. Subsequent requests return a new pagination token. By + use of this token, you can paginate through the full list of items. """ function list_user_import_jobs( MaxResults, UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3993,7 +4536,12 @@ end list_user_pool_clients(user_pool_id) list_user_pool_clients(user_pool_id, params::Dict{String,<:Any}) -Lists the clients that have been created for the specified user pool. +Lists the clients that have been created for the specified user pool. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments - `user_pool_id`: The user pool ID for the user pool where you want to list user pool @@ -4035,7 +4583,12 @@ end list_user_pools(max_results) list_user_pools(max_results, params::Dict{String,<:Any}) -Lists the user pools associated with an Amazon Web Services account. +Lists the user pools associated with an Amazon Web Services account. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments - `max_results`: The maximum number of results you want the request to return when listing @@ -4073,7 +4626,11 @@ end list_users(user_pool_id) list_users(user_pool_id, params::Dict{String,<:Any}) -Lists the users in the Amazon Cognito user pool. +Lists users and their basic details in a user pool. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool on which the search should be @@ -4081,9 +4638,14 @@ Lists the users in the Amazon Cognito user pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AttributesToGet"`: An array of strings, where each string is the name of a user - attribute to be returned for each user in the search results. If the array is null, all - attributes are returned. +- `"AttributesToGet"`: A JSON array of user attribute names, for example given_name, that + you want Amazon Cognito to include in the response for each user. When you don't provide an + AttributesToGet parameter, Amazon Cognito returns all attributes for each user. Use + AttributesToGet with required attributes in your user pool, or in conjunction with Filter. + Amazon Cognito returns an error if not all users in the results have set a value for the + attribute you request. Attributes that you can't filter on, including custom attributes, + must have a value set in every user profile before an AttributesToGet parameter returns + results. - `"Filter"`: A filter string of the form \"AttributeName Filter-Type \"AttributeValue\"\". Quotation marks within the filter string must be escaped using the backslash () character. For example, \"family_name = \"Reddy\"\". AttributeName: The name of the attribute to @@ -4106,8 +4668,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Searching for Users Using the ListUsers API and Examples of Using the ListUsers API in the Amazon Cognito Developer Guide. - `"Limit"`: Maximum number of users to be returned. -- `"PaginationToken"`: An identifier that was returned from the previous call to this - operation, which can be used to return the next set of items in the list. +- `"PaginationToken"`: This API operation returns a limited number of results. The + pagination token is an identifier that you can present in an additional API request with + the same parameters. When you include the pagination token, Amazon Cognito returns the next + set of items after the current list. Subsequent requests return a new pagination token. By + use of this token, you can paginate through the full list of items. """ function list_users(UserPoolId; aws_config::AbstractAWSConfig=global_aws_config()) return cognito_identity_provider( @@ -4136,7 +4701,11 @@ end list_users_in_group(group_name, user_pool_id) list_users_in_group(group_name, user_pool_id, params::Dict{String,<:Any}) -Lists the users in the specified group. Calling this action requires developer credentials. +Lists the users in the specified group. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `group_name`: The name of the group. @@ -4144,7 +4713,7 @@ Lists the users in the specified group. Calling this action requires developer c # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Limit"`: The limit of the request to list users. +- `"Limit"`: The maximum number of users that you want to retrieve before pagination. - `"NextToken"`: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. """ @@ -4183,22 +4752,29 @@ end resend_confirmation_code(client_id, username, params::Dict{String,<:Any}) Resends the confirmation (for confirmation of registration) to a specific user in the user -pool. This action might generate an SMS text message. Starting June 1, 2021, US telecom -carriers require you to register an origination phone number before you can send SMS -messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must -register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number -automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be -able to sign up, activate their accounts, or sign in. If you have never used SMS text -messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification -Service might place your account in the SMS sandbox. In sandbox mode , you can send -messages only to verified phone numbers. After you test your app while in the sandbox -environment, you can move out of the sandbox and into production. For more information, see - SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. +pool. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in +requests for this API operation. For this operation, you can't use IAM credentials to +authorize requests, and you can't grant IAM permissions in policies. For more information +about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API +and user pool endpoints. This action might generate an SMS text message. Starting June 1, +2021, US telecom carriers require you to register an origination phone number before you +can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, +you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered +number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might +not be able to sign up, activate their accounts, or sign in. If you have never used SMS +text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple +Notification Service might place your account in the SMS sandbox. In sandbox mode , you +can send messages only to verified phone numbers. After you test your app while in the +sandbox environment, you can move out of the sandbox and into production. For more +information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito +Developer Guide. # Arguments - `client_id`: The ID of the client associated with the user pool. -- `username`: The username attribute of the user to whom you want to resend a confirmation - code. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4260,18 +4836,27 @@ end respond_to_auth_challenge(challenge_name, client_id) respond_to_auth_challenge(challenge_name, client_id, params::Dict{String,<:Any}) -Responds to the authentication challenge. This action might generate an SMS text message. -Starting June 1, 2021, US telecom carriers require you to register an origination phone -number before you can send SMS messages to US phone numbers. If you use SMS text messages -in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito -uses the registered number automatically. Otherwise, Amazon Cognito users who must receive -SMS messages might not be able to sign up, activate their accounts, or sign in. If you have -never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon -Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , -you can send messages only to verified phone numbers. After you test your app while in the -sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. +Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for +device authentication that bypasses MFA, or for a custom authentication challenge. A +RespondToAuthChallenge API request provides the answer to that challenge, like a code or a +secure remote password (SRP). The parameters of a response to an authentication challenge +vary with the type of challenge. For more information about custom authentication +challenges, see Custom authentication challenge Lambda triggers. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. This +action might generate an SMS text message. Starting June 1, 2021, US telecom carriers +require you to register an origination phone number before you can send SMS messages to US +phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone +number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. +Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, +activate their accounts, or sign in. If you have never used SMS text messages with Amazon +Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place +your account in the SMS sandbox. In sandbox mode , you can send messages only to verified +phone numbers. After you test your app while in the sandbox environment, you can move out +of the sandbox and into production. For more information, see SMS message settings for +Amazon Cognito user pools in the Amazon Cognito Developer Guide. # Arguments - `challenge_name`: The challenge name. For more information, see InitiateAuth. @@ -4282,24 +4867,41 @@ Developer Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AnalyticsMetadata"`: The Amazon Pinpoint analytics metadata that contributes to your metrics for RespondToAuthChallenge calls. -- `"ChallengeResponses"`: The challenge responses. These are inputs corresponding to the - value of ChallengeName, for example: SECRET_HASH (if app client is configured with client - secret) applies to all of the inputs that follow (including SOFTWARE_TOKEN_MFA). - SMS_MFA: SMS_MFA_CODE, USERNAME. PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, - PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME. PASSWORD_VERIFIER requires DEVICE_KEY - when you sign in with a remembered device. NEW_PASSWORD_REQUIRED: NEW_PASSWORD, - USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required - attributes that Amazon Cognito returned as requiredAttributes in the InitiateAuth response, - add a userAttributes.attributename parameter. This parameter can also set values for - writable attributes that aren't required by your user pool. In a NEW_PASSWORD_REQUIRED - challenge response, you can't modify a required attribute that already has a value. In - RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the - requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the - value of any additional attributes. SOFTWARE_TOKEN_MFA: USERNAME and - SOFTWARE_TOKEN_MFA_CODE are required attributes. DEVICE_SRP_AUTH requires USERNAME, - DEVICE_KEY, SRP_A (and SECRET_HASH). DEVICE_PASSWORD_VERIFIER requires everything that - PASSWORD_VERIFIER requires, plus DEVICE_KEY. MFA_SETUP requires USERNAME, plus you must - use the session value returned by VerifySoftwareToken in the Session parameter. +- `"ChallengeResponses"`: The responses to the challenge that you received in the previous + request. Each challenge has its own required response parameters. The following examples + are partial JSON request bodies that highlight challenge-response parameters. You must + provide a SECRET_HASH parameter in all challenge responses to an app client that has a + client secret. SMS_MFA \"ChallengeName\": \"SMS_MFA\", \"ChallengeResponses\": + {\"SMS_MFA_CODE\": \"[SMS_code]\", \"USERNAME\": \"[username]\"} PASSWORD_VERIFIER + \"ChallengeName\": \"PASSWORD_VERIFIER\", \"ChallengeResponses\": + {\"PASSWORD_CLAIM_SIGNATURE\": \"[claim_signature]\", \"PASSWORD_CLAIM_SECRET_BLOCK\": + \"[secret_block]\", \"TIMESTAMP\": [timestamp], \"USERNAME\": \"[username]\"} Add + \"DEVICE_KEY\" when you sign in with a remembered device. CUSTOM_CHALLENGE + \"ChallengeName\": \"CUSTOM_CHALLENGE\", \"ChallengeResponses\": {\"USERNAME\": + \"[username]\", \"ANSWER\": \"[challenge_answer]\"} Add \"DEVICE_KEY\" when you sign in + with a remembered device. NEW_PASSWORD_REQUIRED \"ChallengeName\": + \"NEW_PASSWORD_REQUIRED\", \"ChallengeResponses\": {\"NEW_PASSWORD\": \"[new_password]\", + \"USERNAME\": \"[username]\"} To set any required attributes that InitiateAuth returned in + an requiredAttributes parameter, add \"userAttributes.[attribute_name]\": + \"[attribute_value]\". This parameter can also set values for writable attributes that + aren't required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you + can't modify a required attribute that already has a value. In RespondToAuthChallenge, set + a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then + use the UpdateUserAttributes API operation to modify the value of any additional + attributes. SOFTWARE_TOKEN_MFA \"ChallengeName\": \"SOFTWARE_TOKEN_MFA\", + \"ChallengeResponses\": {\"USERNAME\": \"[username]\", \"SOFTWARE_TOKEN_MFA_CODE\": + [authenticator_code]} DEVICE_SRP_AUTH \"ChallengeName\": \"DEVICE_SRP_AUTH\", + \"ChallengeResponses\": {\"USERNAME\": \"[username]\", \"DEVICE_KEY\": \"[device_key]\", + \"SRP_A\": \"[srp_a]\"} DEVICE_PASSWORD_VERIFIER \"ChallengeName\": + \"DEVICE_PASSWORD_VERIFIER\", \"ChallengeResponses\": {\"DEVICE_KEY\": \"[device_key]\", + \"PASSWORD_CLAIM_SIGNATURE\": \"[claim_signature]\", \"PASSWORD_CLAIM_SECRET_BLOCK\": + \"[secret_block]\", \"TIMESTAMP\": [timestamp], \"USERNAME\": \"[username]\"} MFA_SETUP + \"ChallengeName\": \"MFA_SETUP\", \"ChallengeResponses\": {\"USERNAME\": \"[username]\"}, + \"SESSION\": \"[Session ID from VerifySoftwareToken]\" SELECT_MFA_TYPE + \"ChallengeName\": \"SELECT_MFA_TYPE\", \"ChallengeResponses\": {\"USERNAME\": + \"[username]\", \"ANSWER\": \"[SMS_MFA or SOFTWARE_TOKEN_MFA]\"} For more information + about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see + Working with user devices in your user pool. - `"ClientMetadata"`: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon @@ -4362,7 +4964,11 @@ end Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon -Cognito user APIs, or to authorize access to your resource server. +Cognito user APIs, or to authorize access to your resource server. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `client_id`: The client ID for the token that you want to revoke. @@ -4399,6 +5005,53 @@ function revoke_token( ) end +""" + set_log_delivery_configuration(log_configurations, user_pool_id) + set_log_delivery_configuration(log_configurations, user_pool_id, params::Dict{String,<:Any}) + +Sets up or modifies the detailed activity logging configuration of a user pool. + +# Arguments +- `log_configurations`: A collection of all of the detailed activity logging configurations + for a user pool. +- `user_pool_id`: The ID of the user pool where you want to configure detailed activity + logging . + +""" +function set_log_delivery_configuration( + LogConfigurations, UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cognito_identity_provider( + "SetLogDeliveryConfiguration", + Dict{String,Any}( + "LogConfigurations" => LogConfigurations, "UserPoolId" => UserPoolId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function set_log_delivery_configuration( + LogConfigurations, + UserPoolId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cognito_identity_provider( + "SetLogDeliveryConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "LogConfigurations" => LogConfigurations, "UserPoolId" => UserPoolId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_risk_configuration(user_pool_id) set_risk_configuration(user_pool_id, params::Dict{String,<:Any}) @@ -4505,7 +5158,12 @@ choose an MFA option will be returned during sign-in. If an MFA type is activate user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on -Adaptive Authentication for the user pool. +Adaptive Authentication for the user pool. Authorize this action with a signed-in user's +access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito +doesn't evaluate Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you can't use IAM credentials to authorize requests, and you +can't grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose MFA @@ -4603,7 +5261,13 @@ end This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure -either type of MFA, use SetUserMFAPreference instead. +either type of MFA, use SetUserMFAPreference instead. Authorize this action with a +signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. +Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests +for this API operation. For this operation, you can't use IAM credentials to authorize +requests, and you can't grant IAM permissions in policies. For more information about +authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and +user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose user @@ -4647,22 +5311,28 @@ end sign_up(client_id, password, username, params::Dict{String,<:Any}) Registers the user in the specified user pool and creates a user name, password, and user -attributes. This action might generate an SMS text message. Starting June 1, 2021, US -telecom carriers require you to register an origination phone number before you can send -SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must -register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number -automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be -able to sign up, activate their accounts, or sign in. If you have never used SMS text -messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification -Service might place your account in the SMS sandbox. In sandbox mode , you can send -messages only to verified phone numbers. After you test your app while in the sandbox -environment, you can move out of the sandbox and into production. For more information, see - SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. +attributes. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies +in requests for this API operation. For this operation, you can't use IAM credentials to +authorize requests, and you can't grant IAM permissions in policies. For more information +about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API +and user pool endpoints. This action might generate an SMS text message. Starting June 1, +2021, US telecom carriers require you to register an origination phone number before you +can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, +you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered +number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might +not be able to sign up, activate their accounts, or sign in. If you have never used SMS +text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple +Notification Service might place your account in the SMS sandbox. In sandbox mode , you +can send messages only to verified phone numbers. After you test your app while in the +sandbox environment, you can move out of the sandbox and into production. For more +information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito +Developer Guide. # Arguments - `client_id`: The ID of the client associated with the user pool. - `password`: The password of the user you want to register. -- `username`: The user name of the user you want to register. +- `username`: The username of the user that you want to sign up. The value of this + parameter is typically a username, but can be any alias attribute in your user pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4692,7 +5362,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests. -- `"ValidationData"`: The validation data in the request to register a user. +- `"ValidationData"`: Temporary user attributes that contribute to the outcomes of your pre + sign-up Lambda trigger. This set of key-value pairs are for custom validation of + information that you collect from your users but don't need to retain. Your Lambda function + can analyze this additional data and act on it. Your function might perform external API + operations like logging user attributes and validation data to Amazon CloudWatch Logs. + Validation data might also affect the response that your function returns to Amazon + Cognito, like automatically confirming the user if they sign up from within your network. + For more information about the pre sign-up Lambda trigger, see Pre sign-up Lambda trigger. """ function sign_up( ClientId, Password, Username; aws_config::AbstractAWSConfig=global_aws_config() @@ -4910,14 +5587,25 @@ end Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part -of Amazon Cognito advanced security. +of Amazon Cognito advanced security. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. # Arguments - `event_id`: The event ID. - `feedback_token`: The feedback token. -- `feedback_value`: The authentication event feedback value. +- `feedback_value`: The authentication event feedback value. When you provide a + FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where + Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of + invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe + that Amazon Cognito evaluated a high-enough risk level. - `user_pool_id`: The user pool ID. -- `username`: The user pool username. +- `username`: The username of the user that you want to query or modify. The value of this + parameter is typically your user's username, but it can be any of their alias attributes. + If username isn't an alias attribute in your user pool, this value must be the sub of a + local user or the username of a user from a third-party IdP. """ function update_auth_event_feedback( @@ -4974,7 +5662,13 @@ end update_device_status(access_token, device_key) update_device_status(access_token, device_key, params::Dict{String,<:Any}) -Updates the device status. +Updates the device status. For more information about device authentication, see Working +with user devices in your user pool. Authorize this action with a signed-in user's access +token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose device @@ -5019,8 +5713,11 @@ end update_group(group_name, user_pool_id) update_group(group_name, user_pool_id, params::Dict{String,<:Any}) -Updates the specified group with the specified attributes. Calling this action requires -developer credentials. +Updates the specified group with the specified attributes. Amazon Cognito evaluates +Identity and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `group_name`: The name of the group. @@ -5068,7 +5765,11 @@ end update_identity_provider(provider_name, user_pool_id) update_identity_provider(provider_name, user_pool_id, params::Dict{String,<:Any}) -Updates IdP information for a user pool. +Updates IdP information for a user pool. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `provider_name`: The IdP name. @@ -5078,7 +5779,74 @@ Updates IdP information for a user pool. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AttributeMapping"`: The IdP attribute mapping to be changed. - `"IdpIdentifiers"`: A list of IdP identifiers. -- `"ProviderDetails"`: The IdP details to be updated, such as MetadataURL and MetadataFile. +- `"ProviderDetails"`: The scopes, URLs, and identifiers for your external identity + provider. The following examples describe the provider detail keys for each IdP type. These + values and their schema are subject to change. Social IdP authorize_scopes values must + match the values listed here. OpenID Connect (OIDC) Amazon Cognito accepts the following + elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, + authorize_url, jwks_uri, token_url. Create or update request: \"ProviderDetails\": { + \"attributes_request_method\": \"GET\", \"attributes_url\": + \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", + \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": + \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": + \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": + \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" } Describe + response: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", + \"attributes_url\": \"https://auth.example.com/userInfo\", + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile + email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": + \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": + \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": + \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" } SAML Create + or update request with Metadata URL: \"ProviderDetails\": { \"IDPInit\": \"true\", + \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": + \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" + } Create or update request with Metadata file: \"ProviderDetails\": { \"IDPInit\": + \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": + \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" } The value of + MetadataFile must be the plaintext metadata document with all quote (\") characters escaped + by backslashes. Describe response: \"ProviderDetails\": { \"IDPInit\": \"true\", + \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", + \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": + \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": + \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", + \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" } LoginWithAmazon + Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"profile + postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", + \"client_secret\": \"provider-app-client-secret\" Describe response: \"ProviderDetails\": + { \"attributes_url\": \"https://api.amazon.com/user/profile\", + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile + postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": + \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": + \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": + \"https://api.amazon.com/auth/o2/token\" } Google Create or update request: + \"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": + \"1example23456789.apps.googleusercontent.com\", \"client_secret\": + \"provider-app-client-secret\" } Describe response: \"ProviderDetails\": { + \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", + \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile + openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", + \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": + \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", + \"token_request_method\": \"POST\", \"token_url\": + \"https://www.googleapis.com/oauth2/v4/token\" } SignInWithApple Create or update + request: \"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": + \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", + \"team_id\": \"3EXAMPLE\" } Describe response: \"ProviderDetails\": { + \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", + \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": + \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": + \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": + \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" } Facebook Create or + update request: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": + \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": + \"provider-app-client-secret\" } Describe response: \"ProviderDetails\": { + \"api_version\": \"v17.0\", \"attributes_url\": + \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": + \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": + \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", + \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", + \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" } """ function update_identity_provider( ProviderName, UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -5117,10 +5885,19 @@ end update_resource_server(identifier, name, user_pool_id, params::Dict{String,<:Any}) Updates the name and scopes of resource server. All other fields are read-only. If you -don't provide a value for an attribute, it is set to the default value. +don't provide a value for an attribute, it is set to the default value. Amazon Cognito +evaluates Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you must use IAM credentials to authorize requests, and you must grant +yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web +Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + # Arguments -- `identifier`: The identifier for the resource server. +- `identifier`: A unique resource server identifier for the resource server. The identifier + can be an API friendly name like solar-system-data. You can also set an API URL like + https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents + scopes in the access token in the format resource-server-identifier/scope. Longer + scope-identifier strings increase the size of your access tokens. - `name`: The name of the resource server. - `user_pool_id`: The user pool ID for the user pool. @@ -5167,18 +5944,27 @@ end update_user_attributes(access_token, user_attributes) update_user_attributes(access_token, user_attributes, params::Dict{String,<:Any}) -Allows a user to update a specific attribute (one at a time). This action might generate -an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an -origination phone number before you can send SMS messages to US phone numbers. If you use -SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. -Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users -who must receive SMS messages might not be able to sign up, activate their accounts, or -sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon -Web Service, Amazon Simple Notification Service might place your account in the SMS -sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you -test your app while in the sandbox environment, you can move out of the sandbox and into -production. For more information, see SMS message settings for Amazon Cognito user pools -in the Amazon Cognito Developer Guide. +With this operation, your users can update one or more of their attributes with their own +credentials. You authorize this API request with the user's access token. To delete an +attribute from your user, submit the attribute in your API request with a blank value. +Custom attribute values in this request must include the custom: prefix. Authorize this +action with a signed-in user's access token. It must include the scope +aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you can't +use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. +For more information about authorization models in Amazon Cognito, see Using the Amazon +Cognito user pools API and user pool endpoints. This action might generate an SMS text +message. Starting June 1, 2021, US telecom carriers require you to register an origination +phone number before you can send SMS messages to US phone numbers. If you use SMS text +messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon +Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must +receive SMS messages might not be able to sign up, activate their accounts, or sign in. If +you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, +Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox +mode , you can send messages only to verified phone numbers. After you test your app while +in the sandbox environment, you can move out of the sandbox and into production. For more +information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito +Developer Guide. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose user @@ -5245,20 +6031,24 @@ end update_user_pool(user_pool_id) update_user_pool(user_pool_id, params::Dict{String,<:Any}) -Updates the specified user pool with the specified attributes. You can get a list of the -current user pool settings using DescribeUserPool. If you don't provide a value for an -attribute, it will be set to the default value. This action might generate an SMS text -message. Starting June 1, 2021, US telecom carriers require you to register an origination -phone number before you can send SMS messages to US phone numbers. If you use SMS text -messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon -Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must -receive SMS messages might not be able to sign up, activate their accounts, or sign in. If -you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, -Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox -mode , you can send messages only to verified phone numbers. After you test your app while -in the sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. + This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers +require you to register an origination phone number before you can send SMS messages to US +phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone +number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. +Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, +activate their accounts, or sign in. If you have never used SMS text messages with Amazon +Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place +your account in the SMS sandbox. In sandbox mode , you can send messages only to verified +phone numbers. After you test your app while in the sandbox environment, you can move out +of the sandbox and into production. For more information, see SMS message settings for +Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified +user pool with the specified attributes. You can get a list of the current user pool +settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon +Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access +Management (IAM) policies in requests for this API operation. For this operation, you must +use IAM credentials to authorize requests, and you must grant yourself the corresponding +IAM permission in a policy. Learn more Signing Amazon Web Services API Requests +Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for the user pool you want to update. @@ -5312,8 +6102,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For more information, see Verifying updates to email addresses and phone numbers. -- `"UserPoolAddOns"`: Enables advanced security risk detection. Set the key - AdvancedSecurityMode to the value \"AUDIT\". +- `"UserPoolAddOns"`: User pool add-ons. Contains settings for activation of advanced + security features. To log user security information but take no action, set to AUDIT. To + configure automatic security responses to risky traffic to your user pool, set to ENFORCED. + For more information, see Adding advanced security to a user pool. - `"UserPoolTags"`: The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria. @@ -5348,9 +6140,13 @@ end Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings using DescribeUserPoolClient. If you -don't provide a value for an attribute, it will be set to the default value. You can also -use this operation to enable token revocation for user pool clients. For more information -about revoking tokens, see RevokeToken. +don't provide a value for an attribute, Amazon Cognito sets it to its default value. You +can also use this operation to enable token revocation for user pool clients. For more +information about revoking tokens, see RevokeToken. Amazon Cognito evaluates Identity and +Access Management (IAM) policies in requests for this API operation. For this operation, +you must use IAM credentials to authorize requests, and you must grant yourself the +corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API +Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `client_id`: The ID of the client associated with the user pool. @@ -5373,8 +6169,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ID token, based on scopes) directly to your user. client_credentials Issue the access token from the /oauth2/token endpoint directly to a non-person user using a combination of the client ID and client secret. -- `"AllowedOAuthFlowsUserPoolClient"`: Set to true if the client is allowed to follow the - OAuth protocol when interacting with Amazon Cognito user pools. +- `"AllowedOAuthFlowsUserPoolClient"`: Set to true to use OAuth 2.0 features in your user + pool app client. AllowedOAuthFlowsUserPoolClient must be true before you can configure the + following features in your app client. CallBackURLs: Callback URLs. LogoutURLs: + Sign-out redirect URLs. AllowedOAuthScopes: OAuth 2.0 scopes. AllowedOAuthFlows: + Support for authorization code, implicit, and client credentials OAuth 2.0 grants. To use + OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set + AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient + API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with + the CLI or SDKs, it defaults to false. - `"AllowedOAuthScopes"`: The allowed OAuth scopes. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. @@ -5428,8 +6231,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request. For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their - session with their ID token for 10 hours. The default time unit for AccessTokenValidity in - an API request is hours. Valid range is displayed below in seconds. If you don't specify + session with their ID token for 10 hours. The default time unit for IdTokenValidity in an + API request is hours. Valid range is displayed below in seconds. If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour. - `"LogoutURLs"`: A list of allowed logout URLs for the IdPs. - `"PreventUserExistenceErrors"`: Errors and responses that you want Amazon Cognito APIs to @@ -5441,7 +6244,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. -- `"ReadAttributes"`: The read-only attributes of the user pool. +- `"ReadAttributes"`: The list of user attributes that you want your app client to have + read-only access to. After your user authenticates in your app, their access token + authorizes them to read their own attribute value for any attribute in this list. An + example of this kind of activity is when your user selects a link to view their profile + information. Your app makes a GetUser API request to retrieve and display your user's + profile data. When you don't specify the ReadAttributes for your app client, your app can + read the values of email_verified, phone_number_verified, and the Standard attributes of + your user pool. When your user pool has read access to these default attributes, + ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes + in the API response if you have specified your own custom set of read attributes. - `"RefreshTokenValidity"`: The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for RefreshTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request. For @@ -5454,9 +6266,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SupportedIdentityProviders"`: A list of provider names for the IdPs that this client supports. The following are supported: COGNITO, Facebook, Google, SignInWithApple, LoginWithAmazon, and the names of your own SAML and OIDC providers. -- `"TokenValidityUnits"`: The units in which the validity times are represented. The - default unit for RefreshToken is days, and the default for ID and access tokens is hours. -- `"WriteAttributes"`: The writeable attributes of the user pool. +- `"TokenValidityUnits"`: The time units you use when you set the duration of ID, access, + and refresh tokens. The default unit for RefreshToken is days, and the default for ID and + access tokens is hours. +- `"WriteAttributes"`: The list of user attributes that you want your app client to have + write access to. After your user authenticates in your app, their access token authorizes + them to set or modify their own attribute value for any attribute in this list. An example + of this kind of activity is when you present your user with a form to update their profile + information and they change their last name. Your app then makes an UpdateUserAttributes + API request and sets family_name to the new value. When you don't specify the + WriteAttributes for your app client, your app can write the values of the Standard + attributes of your user pool. When your user pool has write access to these default + attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates + WriteAttributes in the API response if you have specified your own custom set of write + attributes. If your app client allows users to sign in through an IdP, this array must + include all attributes that you have mapped to IdP attributes. Amazon Cognito updates + mapped attributes when users sign in to your application through an IdP. If your app client + does not have write access to a mapped attribute, Amazon Cognito throws an error when it + tries to update the attribute. For more information, see Specifying IdP Attribute Mappings + for Your user pool. """ function update_user_pool_client( ClientId, UserPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -5507,7 +6335,12 @@ certificate to your custom domain, you must provide this ARN to Amazon Cognito. add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain. For more information about adding a -custom domain to your user pool, see Using Your Own Domain for the Hosted UI. +custom domain to your user pool, see Using Your Own Domain for the Hosted UI. Amazon +Cognito evaluates Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you must use IAM credentials to authorize requests, and you +must grant yourself the corresponding IAM permission in a policy. Learn more Signing +Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool +endpoints # Arguments - `custom_domain_config`: The configuration for a custom domain that hosts the sign-up and @@ -5569,7 +6402,11 @@ end Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an -access token or a session string, but not both. +access token or a session string, but not both. Amazon Cognito doesn't evaluate Identity +and Access Management (IAM) policies in requests for this API operation. For this +operation, you can't use IAM credentials to authorize requests, and you can't grant IAM +permissions in policies. For more information about authorization models in Amazon Cognito, +see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `user_code`: The one- time password computed using the secret code returned by @@ -5613,7 +6450,12 @@ end Verifies the specified user attributes in the user pool. If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see -UserAttributeUpdateSettingsType. +UserAttributeUpdateSettingsType. Authorize this action with a signed-in user's access +token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't +evaluate Identity and Access Management (IAM) policies in requests for this API operation. +For this operation, you can't use IAM credentials to authorize requests, and you can't +grant IAM permissions in policies. For more information about authorization models in +Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose user diff --git a/src/services/comprehend.jl b/src/services/comprehend.jl index b13f5ac2da..0d092d2392 100644 --- a/src/services/comprehend.jl +++ b/src/services/comprehend.jl @@ -226,7 +226,8 @@ end batch_detect_targeted_sentiment(language_code, text_list, params::Dict{String,<:Any}) Inspects a batch of documents and returns a sentiment analysis for each entity identified -in the documents. For more information about targeted sentiment, see Targeted sentiment. +in the documents. For more information about targeted sentiment, see Targeted sentiment in +the Amazon Comprehend Developer Guide. # Arguments - `language_code`: The language of the input documents. Currently, English is the only @@ -269,28 +270,39 @@ end classify_document(endpoint_arn) classify_document(endpoint_arn, params::Dict{String,<:Any}) -Creates a new document classification request to analyze a single document in real-time, -using a previously created and trained custom model and an endpoint. You can input plain -text or you can upload a single-page input document (text, PDF, Word, or image). If the -system detects errors while processing a page in the input document, the API response -includes an entry in Errors that describes the errors. If the system detects a -document-level error in your input document, the API returns an InvalidRequestException -error response. For details about this exception, see Errors in semi-structured documents -in the Comprehend Developer Guide. +Creates a classification request to analyze a single document in real-time. +ClassifyDocument supports the following model types: Custom classifier - a custom model +that you have created and trained. For input, you can provide plain text, a single-page +document (PDF, Word, or image), or Amazon Textract API output. For more information, see +Custom classification in the Amazon Comprehend Developer Guide. Prompt safety classifier +- Amazon Comprehend provides a pre-trained model for classifying input prompts for +generative AI applications. For input, you provide English plain text input. For prompt +safety classification, the response includes only the Classes field. For more information +about prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend +Developer Guide. If the system detects errors while processing a page in the input +document, the API response includes an Errors field that describes the errors. If the +system detects a document-level error in your input document, the API returns an +InvalidRequestException error response. For details about this exception, see Errors in +semi-structured documents in the Comprehend Developer Guide. # Arguments -- `endpoint_arn`: The Amazon Resource Number (ARN) of the endpoint. For information about - endpoints, see Managing endpoints. +- `endpoint_arn`: The Amazon Resource Number (ARN) of the endpoint. For prompt safety + classification, Amazon Comprehend provides the endpoint ARN. For more information about + prompt safety classifiers, see Prompt safety classification in the Amazon Comprehend + Developer Guide For custom classification, you create an endpoint for your custom model. + For more information, see Using Amazon Comprehend endpoints. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Bytes"`: Use the Bytes parameter to input a text, PDF, Word or image file. You can also - use the Bytes parameter to input an Amazon Textract DetectDocumentText or AnalyzeDocument - output file. Provide the input document as a sequence of base64-encoded bytes. If your code - uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file - bytes for you. The maximum length of this field depends on the input document type. For - details, see Inputs for real-time custom analysis in the Comprehend Developer Guide. If - you use the Bytes parameter, do not use the Text parameter. +- `"Bytes"`: Use the Bytes parameter to input a text, PDF, Word or image file. When you + classify a document using a custom model, you can also use the Bytes parameter to input an + Amazon Textract DetectDocumentText or AnalyzeDocument output file. To classify a document + using the prompt safety classifier, use the Text parameter for input. Provide the input + document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services + SDK to classify documents, the SDK may encode the document file bytes for you. The maximum + length of this field depends on the input document type. For details, see Inputs for + real-time custom analysis in the Comprehend Developer Guide. If you use the Bytes + parameter, do not use the Text parameter. - `"DocumentReaderConfig"`: Provides configuration parameters to override the default actions for extracting text from PDF documents and image files. - `"Text"`: The document text to be analyzed. If you enter text using this parameter, do @@ -328,8 +340,7 @@ returns the labels of identified PII entity types such as name, address, bank ac number, or phone number. # Arguments -- `language_code`: The language of the input documents. Currently, English is the only - valid language. +- `language_code`: The language of the input documents. - `text`: A UTF-8 text string. The maximum string size is 100 KB. """ @@ -454,10 +465,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one. - `"Mode"`: Indicates the mode in which the classifier will be trained. The classifier can - be trained in multi-class mode, which identifies one and only one class for each document, - or multi-label mode, which identifies one or more labels for each document. In multi-label - mode, multiple labels for an individual document are separated by a delimiter. The default - delimiter between labels is a pipe (|). + be trained in multi-class (single-label) mode or multi-label mode. Multi-class mode + identifies a single class label for each document and multi-label mode identifies one or + more class labels for each document. Multiple labels for an individual document are + separated by a delimiter. The default delimiter between labels is a pipe (|). - `"ModelKmsKeyId"`: ID for the KMS key that Amazon Comprehend uses to encrypt trained custom models. The ModelKmsKeyId can be either of the following formats: KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\" Amazon Resource Name (ARN) of a KMS Key: @@ -472,7 +483,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys and double quotes to enclose the JSON names and values: '{\"attribute\": \"value\", \"attribute\": [\"value\"]}' - `"OutputDataConfig"`: Specifies the location for the output files from a custom - classifier job. This parameter is required for a request that creates a native classifier + classifier job. This parameter is required for a request that creates a native document model. - `"Tags"`: Tags to associate with the document classifier. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with @@ -739,13 +750,16 @@ the Amazon Comprehend Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ActiveModelArn"`: To associate an existing model with the flywheel, specify the Amazon - Resource Number (ARN) of the model version. + Resource Number (ARN) of the model version. Do not set TaskConfig or ModelType if you + specify an ActiveModelArn. - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one. - `"DataSecurityConfig"`: Data security configurations. -- `"ModelType"`: The model type. +- `"ModelType"`: The model type. You need to set ModelType if you are creating a flywheel + for a new model. - `"Tags"`: The tags to associate with this flywheel. -- `"TaskConfig"`: Configuration about the custom classifier associated with the flywheel. +- `"TaskConfig"`: Configuration about the model associated with the flywheel. You need to + set TaskConfig if you are creating a flywheel for a new model. """ function create_flywheel( DataAccessRoleArn, @@ -1688,8 +1702,8 @@ Inspects the input text for entities that contain personally identifiable inform and returns information about them. # Arguments -- `language_code`: The language of the input documents. Currently, English is the only - valid language. +- `language_code`: The language of the input text. Enter the language code for English (en) + or Spanish (es). - `text`: A UTF-8 text string. The maximum string size is 100 KB. """ @@ -1815,7 +1829,8 @@ end detect_targeted_sentiment(language_code, text, params::Dict{String,<:Any}) Inspects the input text and returns a sentiment analysis for each entity identified in the -text. For more information about targeted sentiment, see Targeted sentiment. +text. For more information about targeted sentiment, see Targeted sentiment in the Amazon +Comprehend Developer Guide. # Arguments - `language_code`: The language of the input documents. Currently, English is the only @@ -1853,6 +1868,54 @@ function detect_targeted_sentiment( ) end +""" + detect_toxic_content(language_code, text_segments) + detect_toxic_content(language_code, text_segments, params::Dict{String,<:Any}) + +Performs toxicity analysis on the list of text strings that you provide as input. The API +response contains a results list that matches the size of the input list. For more +information about toxicity detection, see Toxicity detection in the Amazon Comprehend +Developer Guide. + +# Arguments +- `language_code`: The language of the input text. Currently, English is the only supported + language. +- `text_segments`: A list of up to 10 text strings. Each string has a maximum size of 1 KB, + and the maximum size of the list is 10 KB. + +""" +function detect_toxic_content( + LanguageCode, TextSegments; aws_config::AbstractAWSConfig=global_aws_config() +) + return comprehend( + "DetectToxicContent", + Dict{String,Any}("LanguageCode" => LanguageCode, "TextSegments" => TextSegments); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function detect_toxic_content( + LanguageCode, + TextSegments, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return comprehend( + "DetectToxicContent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "LanguageCode" => LanguageCode, "TextSegments" => TextSegments + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ import_model(source_model_arn) import_model(source_model_arn, params::Dict{String,<:Any}) @@ -2545,8 +2608,8 @@ end start_document_classification_job(data_access_role_arn, input_data_config, output_data_config) start_document_classification_job(data_access_role_arn, input_data_config, output_data_config, params::Dict{String,<:Any}) -Starts an asynchronous document classification job. Use the -DescribeDocumentClassificationJob operation to track the progress of the job. +Starts an asynchronous document classification job using a custom classification model. Use +the DescribeDocumentClassificationJob operation to track the progress of the job. # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the IAM role that grants Amazon @@ -2997,8 +3060,8 @@ Starts an asynchronous PII entity detection job for a collection of documents. - `data_access_role_arn`: The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. - `input_data_config`: The input properties for a PII entities detection job. -- `language_code`: The language of the input documents. Currently, English is the only - valid language. +- `language_code`: The language of the input documents. Enter the language code for English + (en) or Spanish (es). - `mode`: Specifies whether the output provides the locations (offsets) of PII entities or a file in which PII entities are redacted. - `output_data_config`: Provides configuration parameters for the output of PII entity diff --git a/src/services/comprehendmedical.jl b/src/services/comprehendmedical.jl index e914442548..0ee800160b 100644 --- a/src/services/comprehendmedical.jl +++ b/src/services/comprehendmedical.jl @@ -12,7 +12,7 @@ Gets the properties associated with a medical entities detection job. Use this o get the status of a detection job. # Arguments -- `job_id`: The identifier that Comprehend Medical; generated for the job. The +- `job_id`: The identifier that Amazon Comprehend Medical generated for the job. The StartEntitiesDetectionV2Job operation returns this identifier in its response. """ @@ -78,7 +78,7 @@ Gets the properties associated with a protected health information (PHI) detecti this operation to get the status of a detection job. # Arguments -- `job_id`: The identifier that Comprehend Medical; generated for the job. The +- `job_id`: The identifier that Amazon Comprehend Medical generated for the job. The StartPHIDetectionJob operation returns this identifier in its response. """ @@ -172,13 +172,12 @@ end detect_entities(text, params::Dict{String,<:Any}) The DetectEntities operation is deprecated. You should use the DetectEntitiesV2 operation -instead. Inspects the clinical text for a variety of medical entities and returns specific +instead. Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that -information . +information. # Arguments - `text`: A UTF-8 text string containing the clinical content being examined for entities. - Each string must contain fewer than 20,000 bytes of characters. """ function detect_entities(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -214,8 +213,7 @@ operation in all new applications. The DetectEntitiesV2 operation returns the Ac Direction entities as attributes instead of types. # Arguments -- `text`: A UTF-8 string containing the clinical content being examined for entities. Each - string must contain fewer than 20,000 bytes of characters. +- `text`: A UTF-8 string containing the clinical content being examined for entities. """ function detect_entities_v2(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -241,13 +239,13 @@ end detect_phi(text) detect_phi(text, params::Dict{String,<:Any}) - Inspects the clinical text for protected health information (PHI) entities and returns the +Inspects the clinical text for protected health information (PHI) entities and returns the entity category, location, and confidence score for each entity. Amazon Comprehend Medical only detects entities in English language texts. # Arguments -- `text`: A UTF-8 text string containing the clinical content being examined for PHI - entities. Each string must contain fewer than 20,000 bytes of characters. +- `text`: A UTF-8 text string containing the clinical content being examined for PHI + entities. """ function detect_phi(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -279,8 +277,7 @@ Centers for Disease Control. Amazon Comprehend Medical only detects medical enti English language texts. # Arguments -- `text`: The input text used for analysis. The input for InferICD10CM is a string from 1 - to 10000 characters. +- `text`: The input text used for analysis. """ function infer_icd10_cm(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -312,8 +309,7 @@ Medicine. Amazon Comprehend Medical only detects medical entities in English lan texts. # Arguments -- `text`: The input text used for analysis. The input for InferRxNorm is a string from 1 to - 10000 characters. +- `text`: The input text used for analysis. """ function infer_rx_norm(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -343,8 +339,7 @@ end the Systematized Nomenclature of Medicine, Clinical Terms (SNOMED-CT) ontology # Arguments -- `text`: The input text to be analyzed using InferSNOMEDCT. The text should be a string - with 1 to 10000 characters. +- `text`: The input text to be analyzed using InferSNOMEDCT. """ function infer_snomedct(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -434,7 +429,7 @@ end list_phidetection_jobs() list_phidetection_jobs(params::Dict{String,<:Any}) -Gets a list of protected health information (PHI) detection jobs that you have submitted. +Gets a list of protected health information (PHI) detection jobs you have submitted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -528,19 +523,19 @@ DescribeEntitiesDetectionV2Job operation to track the status of a job. # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: The input configuration that specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same - language. Comprehend Medical; processes files in US English (en). + language. Amazon Comprehend Medical processes files in US English (en). - `output_data_config`: The output configuration that specifies where to send the output files. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one for you. + request token, Amazon Comprehend Medical generates one for you. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -602,8 +597,8 @@ ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -612,7 +607,7 @@ ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -674,8 +669,8 @@ DescribePHIDetectionJob operation to track the status of a job. # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -684,7 +679,7 @@ DescribePHIDetectionJob operation to track the status of a job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -746,8 +741,8 @@ ontology. Use the DescribeRxNormInferenceJob operation to track the status of a # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -756,7 +751,7 @@ ontology. Use the DescribeRxNormInferenceJob operation to track the status of a # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. diff --git a/src/services/compute_optimizer.jl b/src/services/compute_optimizer.jl index 7787781e10..fa7d9b491b 100644 --- a/src/services/compute_optimizer.jl +++ b/src/services/compute_optimizer.jl @@ -17,8 +17,7 @@ Guide. - `resource_type`: The target resource type of the recommendation preference to delete. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an - Auto Scaling group. The valid values for this parameter are Ec2Instance and - AutoScalingGroup. + Auto Scaling group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -474,6 +473,142 @@ function export_lambda_function_recommendations( ) end +""" + export_license_recommendations(s3_destination_config) + export_license_recommendations(s3_destination_config, params::Dict{String,<:Any}) + + Export optimization recommendations for your licenses. Recommendations are exported in a +comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) +file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For +more information, see Exporting Recommendations in the Compute Optimizer User Guide. You +can have only one license export job in progress per Amazon Web Services Region. + +# Arguments +- `s3_destination_config`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: The IDs of the Amazon Web Services accounts for which to export license + recommendations. If your account is the management account of an organization, use this + parameter to specify the member account for which you want to export recommendations. This + parameter can't be specified together with the include member accounts parameter. The + parameters are mutually exclusive. If this parameter is omitted, recommendations for member + accounts aren't included in the export. You can specify multiple account IDs per request. +- `"fieldsToExport"`: The recommendations data to include in the export file. For more + information about the fields that can be exported, see Exported files in the Compute + Optimizer User Guide. +- `"fileFormat"`: The format of the export file. A CSV file is the only export format + currently supported. +- `"filters"`: An array of objects to specify a filter that exports a more specific set of + license recommendations. +- `"includeMemberAccounts"`: Indicates whether to include recommendations for resources in + all member accounts of the organization if your account is the management account of an + organization. The member accounts must also be opted in to Compute Optimizer, and trusted + access for Compute Optimizer must be enabled in the organization account. For more + information, see Compute Optimizer and Amazon Web Services Organizations trusted access in + the Compute Optimizer User Guide. If this parameter is omitted, recommendations for member + accounts of the organization aren't included in the export file . This parameter cannot be + specified together with the account IDs parameter. The parameters are mutually exclusive. +""" +function export_license_recommendations( + s3DestinationConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "ExportLicenseRecommendations", + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function export_license_recommendations( + s3DestinationConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "ExportLicenseRecommendations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + export_rdsdatabase_recommendations(s3_destination_config) + export_rdsdatabase_recommendations(s3_destination_config, params::Dict{String,<:Any}) + + Export optimization recommendations for your Amazon Relational Database Service (Amazon +RDS). Recommendations are exported in a comma-separated values (CSV) file, and its +metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage +Service (Amazon S3) bucket that you specify. For more information, see Exporting +Recommendations in the Compute Optimizer User Guide. You can have only one Amazon RDS +export job in progress per Amazon Web Services Region. + +# Arguments +- `s3_destination_config`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: The Amazon Web Services account IDs for the export Amazon RDS + recommendations. If your account is the management account or the delegated administrator + of an organization, use this parameter to specify the member account you want to export + recommendations to. This parameter can't be specified together with the include member + accounts parameter. The parameters are mutually exclusive. If this parameter or the include + member accounts parameter is omitted, the recommendations for member accounts aren't + included in the export. You can specify multiple account IDs per request. +- `"fieldsToExport"`: The recommendations data to include in the export file. For more + information about the fields that can be exported, see Exported files in the Compute + Optimizer User Guide. +- `"fileFormat"`: The format of the export file. The CSV file is the only export file + format currently supported. +- `"filters"`: An array of objects to specify a filter that exports a more specific set of + Amazon RDS recommendations. +- `"includeMemberAccounts"`: If your account is the management account or the delegated + administrator of an organization, this parameter indicates whether to include + recommendations for resources in all member accounts of the organization. The member + accounts must also be opted in to Compute Optimizer, and trusted access for Compute + Optimizer must be enabled in the organization account. For more information, see Compute + Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer + User Guide. If this parameter is omitted, recommendations for member accounts of the + organization aren't included in the export file. If this parameter or the account ID + parameter is omitted, recommendations for member accounts aren't included in the export. +- `"recommendationPreferences"`: +""" +function export_rdsdatabase_recommendations( + s3DestinationConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "ExportRDSDatabaseRecommendations", + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function export_rdsdatabase_recommendations( + s3DestinationConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "ExportRDSDatabaseRecommendations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_auto_scaling_group_recommendations() get_auto_scaling_group_recommendations(params::Dict{String,<:Any}) @@ -944,6 +1079,160 @@ function get_lambda_function_recommendations( ) end +""" + get_license_recommendations() + get_license_recommendations(params::Dict{String,<:Any}) + +Returns license recommendations for Amazon EC2 instances that run on a specific license. +Compute Optimizer generates recommendations for licenses that meet a specific set of +requirements. For more information, see the Supported resources and requirements in the +Compute Optimizer User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: The ID of the Amazon Web Services account for which to return license + recommendations. If your account is the management account of an organization, use this + parameter to specify the member account for which you want to return license + recommendations. Only one account ID can be specified per request. +- `"filters"`: An array of objects to specify a filter that returns a more specific list + of license recommendations. +- `"maxResults"`: The maximum number of license recommendations to return with a single + request. To retrieve the remaining results, make another request with the returned + nextToken value. +- `"nextToken"`: The token to advance to the next page of license recommendations. +- `"resourceArns"`: The ARN that identifies the Amazon EC2 instance. The following is + the format of the ARN: arn:aws:ec2:region:aws_account_id:instance/instance-id +""" +function get_license_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return compute_optimizer( + "GetLicenseRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_license_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "GetLicenseRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_rdsdatabase_recommendation_projected_metrics(end_time, period, resource_arn, start_time, stat) + get_rdsdatabase_recommendation_projected_metrics(end_time, period, resource_arn, start_time, stat, params::Dict{String,<:Any}) + + Returns the projected metrics of Amazon RDS recommendations. + +# Arguments +- `end_time`: The timestamp of the last projected metrics data point to return. +- `period`: The granularity, in seconds, of the projected metrics data points. +- `resource_arn`: The ARN that identifies the Amazon RDS. The following is the format of + the ARN: arn:aws:rds:{region}:{accountId}:db:{resourceName} +- `start_time`: The timestamp of the first projected metrics data point to return. +- `stat`: The statistic of the projected metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"recommendationPreferences"`: +""" +function get_rdsdatabase_recommendation_projected_metrics( + endTime, + period, + resourceArn, + startTime, + stat; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "GetRDSDatabaseRecommendationProjectedMetrics", + Dict{String,Any}( + "endTime" => endTime, + "period" => period, + "resourceArn" => resourceArn, + "startTime" => startTime, + "stat" => stat, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_rdsdatabase_recommendation_projected_metrics( + endTime, + period, + resourceArn, + startTime, + stat, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "GetRDSDatabaseRecommendationProjectedMetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endTime" => endTime, + "period" => period, + "resourceArn" => resourceArn, + "startTime" => startTime, + "stat" => stat, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_rdsdatabase_recommendations() + get_rdsdatabase_recommendations(params::Dict{String,<:Any}) + + Returns Amazon RDS recommendations. Compute Optimizer generates recommendations for +Amazon RDS that meet a specific set of requirements. For more information, see the +Supported resources and requirements in the Compute Optimizer User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: Return the Amazon RDS recommendations to the specified Amazon Web + Services account IDs. If your account is the management account or the delegated + administrator of an organization, use this parameter to return the Amazon RDS + recommendations to specific member accounts. You can only specify one account ID per + request. +- `"filters"`: An array of objects to specify a filter that returns a more specific list + of Amazon RDS recommendations. +- `"maxResults"`: The maximum number of Amazon RDS recommendations to return with a single + request. To retrieve the remaining results, make another request with the returned + nextToken value. +- `"nextToken"`: The token to advance to the next page of Amazon RDS recommendations. +- `"recommendationPreferences"`: +- `"resourceArns"`: The ARN that identifies the Amazon RDS. The following is the format + of the ARN: arn:aws:rds:{region}:{accountId}:db:{resourceName} The following is the + format of a DB Cluster ARN: arn:aws:rds:{region}:{accountId}:cluster:{resourceName} +""" +function get_rdsdatabase_recommendations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "GetRDSDatabaseRecommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_rdsdatabase_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "GetRDSDatabaseRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_recommendation_preferences(resource_type) get_recommendation_preferences(resource_type, params::Dict{String,<:Any}) @@ -958,8 +1247,7 @@ infrastructure metrics in the Compute Optimizer User Guide. - `resource_type`: The target resource type of the recommendation preference for which to return preferences. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only - instances that are part of an Auto Scaling group. The valid values for this parameter are - Ec2Instance and AutoScalingGroup. + instances that are part of an Auto Scaling group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1048,8 +1336,7 @@ infrastructure metrics in the Compute Optimizer User Guide. - `resource_type`: The target resource type of the recommendation preference to create. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an - Auto Scaling group. The valid values for this parameter are Ec2Instance and - AutoScalingGroup. + Auto Scaling group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1067,6 +1354,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys To deactivate it, create a recommendation preference. Specify the Inactive status to deactivate the feature, or specify Active to activate it. For more information, see Inferred workload types in the Compute Optimizer User Guide. +- `"lookBackPeriod"`: The preference to control the number of days the utilization metrics + of the Amazon Web Services resource are analyzed. When this preference isn't specified, we + use the default value DAYS_14. You can only set this preference for the Amazon EC2 + instance and Auto Scaling group resource types. +- `"preferredResources"`: The preference to control which resource type values are + considered when generating rightsizing recommendations. You can specify this preference as + a combination of include and exclude lists. You must specify either an includeList or + excludeList. If the preference is an empty set of resource type values, an error occurs. + You can only set this preference for the Amazon EC2 instance and Auto Scaling group + resource types. +- `"savingsEstimationMode"`: The status of the savings estimation mode preference to + create or update. Specify the AfterDiscounts status to activate the preference, or specify + BeforeDiscounts to deactivate the preference. Only the account manager or delegated + administrator of your organization can activate this preference. For more information, see + Savings estimation mode in the Compute Optimizer User Guide. - `"scope"`: An object that describes the scope of the recommendation preference to create. You can create recommendation preferences at the organization level (for management accounts of an organization only), account level, and resource level. For more information, @@ -1079,6 +1381,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys recommendation preferences at the resource level for instances that are part of an Auto Scaling group. You can create recommendation preferences at the resource level only for standalone instances. +- `"utilizationPreferences"`: The preference to control the resource’s CPU utilization + threshold, CPU utilization headroom, and memory utilization headroom. When this preference + isn't specified, we use the following default values. CPU utilization: P99_5 for + threshold PERCENT_20 for headroom Memory utilization: PERCENT_20 for headroom + You can only set CPU and memory utilization preferences for the Amazon EC2 instance + resource type. The threshold setting isn’t available for memory utilization. """ function put_recommendation_preferences( resourceType; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/config_service.jl b/src/services/config_service.jl index bd6f4c5f6a..5ec9080d78 100644 --- a/src/services/config_service.jl +++ b/src/services/config_service.jl @@ -1047,7 +1047,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information. If you do not specify any names, Config returns status information for all Config managed rules that you use. - `"Limit"`: The number of rule evaluation results that you want returned. This parameter - is required if the rule limit for your account is more than the default of 150 rules. For + is required if the rule limit for your account is more than the default of 1000 rules. For information about requesting a rule limit increase, see Config Limits in the Amazon Web Services General Reference Guide. - `"NextToken"`: The nextToken string returned on a previous page that you use to get the @@ -1485,7 +1485,7 @@ end Returns a list of organization Config rules. When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization Config rule names. It is only applicable, when you request all the -organization Config rules. For accounts within an organzation If you deploy an +organization Config rules. For accounts within an organization If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or @@ -1574,13 +1574,13 @@ end Returns a list of organization conformance packs. When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request -all the organization conformance packs. For accounts within an organzation If you deploy -an organizational rule or conformance pack in an organization administrator account, and -then establish a delegated administrator and deploy an organizational rule or conformance -pack in the delegated administrator account, you won't be able to see the organizational -rule or conformance pack in the organization administrator account from the delegated -administrator account or see the organizational rule or conformance pack in the delegated -administrator account from organization administrator account. The +all the organization conformance packs. For accounts within an organization If you +deploy an organizational rule or conformance pack in an organization administrator account, +and then establish a delegated administrator and deploy an organizational rule or +conformance pack in the delegated administrator account, you won't be able to see the +organizational rule or conformance pack in the organization administrator account from the +delegated administrator account or see the organizational rule or conformance pack in the +delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs. @@ -2576,17 +2576,19 @@ end get_resource_config_history(resource_id, resource_type) get_resource_config_history(resource_id, resource_type, params::Dict{String,<:Any}) -Returns a list of ConfigurationItems for the specified resource. The list contains details -about each state of the resource during the specified time interval. If you specified a -retention period to retain your ConfigurationItems between a minimum of 30 days and a -maximum of 7 years (2557 days), Config returns the ConfigurationItems for the specified -retention period. The response is paginated. By default, Config returns a limit of 10 -configuration items per page. You can customize this number with the limit parameter. The -response includes a nextToken string. To get the next page of results, run the request -again and specify the string for the nextToken parameter. Each call to the API is limited -to span a duration of seven days. It is likely that the number of records returned is -smaller than the specified limit. In such cases, you can make another call, using the -nextToken. + For accurate reporting on the compliance status, you must record the +AWS::Config::ResourceCompliance resource type. For more information, see Selecting Which +Resources Config Records. Returns a list of ConfigurationItems for the specified resource. +The list contains details about each state of the resource during the specified time +interval. If you specified a retention period to retain your ConfigurationItems between a +minimum of 30 days and a maximum of 7 years (2557 days), Config returns the +ConfigurationItems for the specified retention period. The response is paginated. By +default, Config returns a limit of 10 configuration items per page. You can customize this +number with the limit parameter. The response includes a nextToken string. To get the next +page of results, run the request again and specify the string for the nextToken parameter. +Each call to the API is limited to span a duration of seven days. It is likely that the +number of records returned is smaller than the specified limit. In such cases, you can make +another call, using the nextToken. # Arguments - `resource_id`: The ID of the resource (for example., sg-xxxxxx). @@ -2596,11 +2598,11 @@ nextToken. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"chronologicalOrder"`: The chronological order for configuration items listed. By default, the results are listed in reverse chronological order. -- `"earlierTime"`: The time stamp that indicates an earlier time. If not specified, the - action returns paginated results that contain configuration items that start when the first - configuration item was recorded. -- `"laterTime"`: The time stamp that indicates a later time. If not specified, current time - is taken. +- `"earlierTime"`: The chronologically earliest time in the time range for which the + history requested. If not specified, the action returns paginated results that contain + configuration items that start when the first configuration item was recorded. +- `"laterTime"`: The chronologically latest time in the time range for which the history + requested. If not specified, current time is taken. - `"limit"`: The maximum number of configuration items returned on each page. The default is 10. You cannot specify a number greater than 100. If you specify 0, Config uses the default. @@ -3246,7 +3248,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TemplateS3Uri"`: The location of the file containing the template body (s3://bucketname/prefix). The uri must point to a conformance pack template (max size: 300 KB) that is located in an Amazon S3 bucket in the same Region as the conformance pack. - You must have access to read Amazon S3 bucket. + You must have access to read Amazon S3 bucket. In addition, in order to ensure a successful + deployment, the template object must not be in an archived storage class if this parameter + is passed. - `"TemplateSSMDocumentDetails"`: An object of type TemplateSSMDocumentDetails, which contains the name or the Amazon Resource Name (ARN) of the Amazon Web Services Systems Manager document (SSM document) and the version of the SSM document that is used to create @@ -3285,14 +3289,15 @@ end put_delivery_channel(delivery_channel) put_delivery_channel(delivery_channel, params::Dict{String,<:Any}) -Creates a delivery channel object to deliver configuration information to an Amazon S3 -bucket and Amazon SNS topic. Before you can create a delivery channel, you must create a -configuration recorder. You can use this action to change the Amazon S3 bucket or an Amazon -SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS -topic, call this action and specify the changed values for the S3 bucket and the SNS topic. -If you specify a different value for either the S3 bucket or the SNS topic, this action -will keep the existing value for the parameter that is not changed. You can have only one -delivery channel per region in your account. +Creates a delivery channel object to deliver configuration information and other compliance +information to an Amazon S3 bucket and Amazon SNS topic. For more information, see +Notifications that Config Sends to an Amazon SNS topic. Before you can create a delivery +channel, you must create a configuration recorder. You can use this action to change the +Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the +Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values +for the S3 bucket and the SNS topic. If you specify a different value for either the S3 +bucket or the SNS topic, this action will keep the existing value for the parameter that is +not changed. You can have only one delivery channel per region in your account. # Arguments - `delivery_channel`: The configuration delivery channel object that delivers the @@ -3553,7 +3558,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys bytes. - `"TemplateS3Uri"`: Location of file containing the template body. The uri must point to the conformance pack template (max size: 300 KB). You must have access to read Amazon S3 - bucket. + bucket. In addition, in order to ensure a successful deployment, the template object must + not be in an archived storage class if this parameter is passed. """ function put_organization_conformance_pack( OrganizationConformancePackName; aws_config::AbstractAWSConfig=global_aws_config() @@ -3595,16 +3601,26 @@ end Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target -(SSM document) must exist and have permissions to use the target. If you make backward -incompatible changes to the SSM document, you must call this again to ensure the -remediations can run. This API does not support adding remediation configurations for -service-linked Config Rules such as Organization Config rules, the rules deployed by -conformance packs, and rules deployed by Amazon Web Services Security Hub. For manual -remediation configuration, you need to provide a value for automationAssumeRole or use a -value in the assumeRolefield to remediate your resources. The SSM automation document can -use either as long as it maps to a valid parameter. However, for automatic remediation -configuration, the only valid assumeRole field value is AutomationAssumeRole and you need -to provide a value for AutomationAssumeRole to remediate your resources. +(SSM document) must exist and have permissions to use the target. Be aware of backward +incompatible changes If you make backward incompatible changes to the SSM document, you +must call this again to ensure the remediations can run. This API does not support adding +remediation configurations for service-linked Config Rules such as Organization Config +rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services +Security Hub. Required fields For manual remediation configuration, you need to provide +a value for automationAssumeRole or use a value in the assumeRolefield to remediate your +resources. The SSM automation document can use either as long as it maps to a valid +parameter. However, for automatic remediation configuration, the only valid assumeRole +field value is AutomationAssumeRole and you need to provide a value for +AutomationAssumeRole to remediate your resources. Auto remediation can be initiated even +for compliant resources If you enable auto remediation for a specific Config rule using +the PutRemediationConfigurations API or the Config console, it initiates the remediation +process for all non-compliant resources for that specific rule. The auto remediation +process relies on the compliance data snapshot which is captured on a periodic basis. Any +non-compliant resource that is updated between the snapshot schedule will continue to be +remediated based on the last known compliance data snapshot. This means that in some cases +auto remediation can be initiated even for compliant resources, since the bootstrap +processor uses a database that can have stale evaluation results based on the last known +compliance data snapshot. # Arguments - `remediation_configurations`: A list of remediation configuration objects. @@ -3645,19 +3661,30 @@ end A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a -specified resource with a specified Config rule. Config generates a remediation exception -when a problem occurs running a remediation action for a specified resource. Remediation -exceptions blocks auto-remediation until the exception is cleared. When placing an -exception on an Amazon Web Services resource, it is recommended that remediation is set as -manual remediation until the given Config rule for the specified resource evaluates the -resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can -add remediation exceptions and change the remediation type back from Manual to Auto if you -want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT -evaluation result can delete resources before the exception is applied. Placing an +specified resource with a specified Config rule. Exceptions block auto remediation +Config generates a remediation exception when a problem occurs running a remediation action +for a specified resource. Remediation exceptions blocks auto-remediation until the +exception is cleared. Manual remediation is recommended when placing an exception When +placing an exception on an Amazon Web Services resource, it is recommended that remediation +is set as manual remediation until the given Config rule for the specified resource +evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as +NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from +Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation +before a NON_COMPLIANT evaluation result can delete resources before the exception is +applied. Exceptions can only be performed on non-compliant resources Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config -evaluation results, see Concepts | Config Rules in the Config Developer Guide. +evaluation results, see Concepts | Config Rules in the Config Developer Guide. Auto +remediation can be initiated even for compliant resources If you enable auto remediation +for a specific Config rule using the PutRemediationConfigurations API or the Config +console, it initiates the remediation process for all non-compliant resources for that +specific rule. The auto remediation process relies on the compliance data snapshot which is +captured on a periodic basis. Any non-compliant resource that is updated between the +snapshot schedule will continue to be remediated based on the last known compliance data +snapshot. This means that in some cases auto remediation can be initiated even for +compliant resources, since the bootstrap processor uses a database that can have stale +evaluation results based on the last known compliance data snapshot. # Arguments - `config_rule_name`: The name of the Config rule for which you want to create remediation diff --git a/src/services/connect.jl b/src/services/connect.jl index 1af51b9c73..23a305e19b 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -55,6 +55,54 @@ function activate_evaluation_form( ) end +""" + associate_analytics_data_set(data_set_id, instance_id) + associate_analytics_data_set(data_set_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. Associates the +specified dataset for a Amazon Connect instance with the target account. You can associate +only one dataset in a single call. + +# Arguments +- `data_set_id`: The identifier of the dataset to associate with the target account. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"TargetAccountId"`: The identifier of the target account. Use to associate a dataset to + a different account than the one containing the Amazon Connect instance. If not specified, + by default this value is the Amazon Web Services account that has the Amazon Connect + instance. +""" +function associate_analytics_data_set( + DataSetId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/analytics-data/instance/$(InstanceId)/association", + Dict{String,Any}("DataSetId" => DataSetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_analytics_data_set( + DataSetId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/analytics-data/instance/$(InstanceId)/association", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("DataSetId" => DataSetId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_approved_origin(instance_id, origin) associate_approved_origin(instance_id, origin, params::Dict{String,<:Any}) @@ -175,6 +223,64 @@ function associate_default_vocabulary( ) end +""" + associate_flow(flow_id, instance_id, resource_id, resource_type) + associate_flow(flow_id, instance_id, resource_id, resource_type, params::Dict{String,<:Any}) + +Associates a connect resource to a flow. + +# Arguments +- `flow_id`: The identifier of the flow. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `resource_id`: The identifier of the resource. +- `resource_type`: A valid resource type. + +""" +function associate_flow( + FlowId, + InstanceId, + ResourceId, + ResourceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/flow-associations/$(InstanceId)", + Dict{String,Any}( + "FlowId" => FlowId, "ResourceId" => ResourceId, "ResourceType" => ResourceType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_flow( + FlowId, + InstanceId, + ResourceId, + ResourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/flow-associations/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FlowId" => FlowId, + "ResourceId" => ResourceId, + "ResourceType" => ResourceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_instance_storage_config(instance_id, resource_type, storage_config) associate_instance_storage_config(instance_id, resource_type, storage_config, params::Dict{String,<:Any}) @@ -190,7 +296,14 @@ association. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `resource_type`: A valid resource type. +- `resource_type`: A valid resource type. To enable streaming for real-time analysis of + contacts, use the following types: For chat contacts, use + REAL_TIME_CONTACT_ANALYSIS_CHAT_SEGMENTS. For voice contacts, use + REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS. REAL_TIME_CONTACT_ANALYSIS_SEGMENTS is + deprecated, but it is still supported and will apply only to VOICE channel contacts. Use + REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS for voice contacts moving forward. If you have + previously associated a stream with REAL_TIME_CONTACT_ANALYSIS_SEGMENTS, no action is + needed to update the stream to REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS. - `storage_config`: A valid storage type. """ @@ -502,65 +615,49 @@ function associate_security_key( end """ - claim_phone_number(phone_number, target_arn) - claim_phone_number(phone_number, target_arn, params::Dict{String,<:Any}) + associate_traffic_distribution_group_user(instance_id, traffic_distribution_group_id, user_id) + associate_traffic_distribution_group_user(instance_id, traffic_distribution_group_id, user_id, params::Dict{String,<:Any}) -Claims an available phone number to your Amazon Connect instance or traffic distribution -group. You can call this API only in the same Amazon Web Services Region where the Amazon -Connect instance or traffic distribution group was created. For more information about how -to use this operation, see Claim a phone number in your country and Claim phone numbers to -traffic distribution groups in the Amazon Connect Administrator Guide. You can call the -SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the -DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. +Associates an agent with a traffic distribution group. # Arguments -- `phone_number`: The phone number you want to claim. Phone numbers are formatted [+] - [country code] [subscriber number including area code]. -- `target_arn`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic - distribution groups that phone numbers are claimed to. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `traffic_distribution_group_id`: The identifier of the traffic distribution group. This + can be the ID or the ARN if the API is being called in the Region where the traffic + distribution group was created. The ARN must be provided if the call is from the replicated + Region. +- `user_id`: The identifier of the user account. This can be the ID or the ARN of the user. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If not provided, the Amazon Web Services SDK populates this - field. For more information about idempotency, see Making retries safe with idempotent - APIs. Pattern: ^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12} -- `"PhoneNumberDescription"`: The description of the phone number. -- `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ -function claim_phone_number( - PhoneNumber, TargetArn; aws_config::AbstractAWSConfig=global_aws_config() +function associate_traffic_distribution_group_user( + InstanceId, + TrafficDistributionGroupId, + UserId; + aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "POST", - "/phone-number/claim", - Dict{String,Any}( - "PhoneNumber" => PhoneNumber, - "TargetArn" => TargetArn, - "ClientToken" => string(uuid4()), - ); + "PUT", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user", + Dict{String,Any}("InstanceId" => InstanceId, "UserId" => UserId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function claim_phone_number( - PhoneNumber, - TargetArn, +function associate_traffic_distribution_group_user( + InstanceId, + TrafficDistributionGroupId, + UserId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "POST", - "/phone-number/claim", + "PUT", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "PhoneNumber" => PhoneNumber, - "TargetArn" => TargetArn, - "ClientToken" => string(uuid4()), - ), + Dict{String,Any}("InstanceId" => InstanceId, "UserId" => UserId), params, ), ); @@ -570,48 +667,43 @@ function claim_phone_number( end """ - create_agent_status(instance_id, name, state) - create_agent_status(instance_id, name, state, params::Dict{String,<:Any}) + associate_user_proficiencies(instance_id, user_id, user_proficiencies) + associate_user_proficiencies(instance_id, user_id, user_proficiencies, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Creates an -agent status for the specified Amazon Connect instance. +>Associates a set of proficiencies with a user. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance - ID in the Amazon Resource Name (ARN) of the instance. -- `name`: The name of the status. -- `state`: The state of the status. + ID in the Amazon Resource Name (ARN of the instance). +- `user_id`: The identifier of the user account. +- `user_proficiencies`: The proficiencies to associate with the user. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the status. -- `"DisplayOrder"`: The display order of the status. -- `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ -function create_agent_status( - InstanceId, Name, State; aws_config::AbstractAWSConfig=global_aws_config() +function associate_user_proficiencies( + InstanceId, UserId, UserProficiencies; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( - "PUT", - "/agent-status/$(InstanceId)", - Dict{String,Any}("Name" => Name, "State" => State); + "POST", + "/users/$(InstanceId)/$(UserId)/associate-proficiencies", + Dict{String,Any}("UserProficiencies" => UserProficiencies); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_agent_status( +function associate_user_proficiencies( InstanceId, - Name, - State, + UserId, + UserProficiencies, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "PUT", - "/agent-status/$(InstanceId)", + "POST", + "/users/$(InstanceId)/$(UserId)/associate-proficiencies", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Name" => Name, "State" => State), params) + mergewith( + _merge, Dict{String,Any}("UserProficiencies" => UserProficiencies), params + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -619,53 +711,47 @@ function create_agent_status( end """ - create_contact_flow(content, instance_id, name, type) - create_contact_flow(content, instance_id, name, type, params::Dict{String,<:Any}) + batch_associate_analytics_data_set(data_set_ids, instance_id) + batch_associate_analytics_data_set(data_set_ids, instance_id, params::Dict{String,<:Any}) -Creates a flow for the specified Amazon Connect instance. You can also create and update -flows using the Amazon Connect Flow language. +This API is in preview release for Amazon Connect and is subject to change. Associates a +list of analytics datasets for a given Amazon Connect instance to a target account. You can +associate multiple datasets in a single call. # Arguments -- `content`: The content of the flow. -- `instance_id`: The identifier of the Amazon Connect instance. -- `name`: The name of the flow. -- `type`: The type of the flow. For descriptions of the available types, see Choose a flow - type in the Amazon Connect Administrator Guide. +- `data_set_ids`: An array of dataset identifiers to associate. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the flow. -- `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +- `"TargetAccountId"`: The identifier of the target account. Use to associate a dataset to + a different account than the one containing the Amazon Connect instance. If not specified, + by default this value is the Amazon Web Services account that has the Amazon Connect + instance. """ -function create_contact_flow( - Content, InstanceId, Name, Type; aws_config::AbstractAWSConfig=global_aws_config() +function batch_associate_analytics_data_set( + DataSetIds, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( "PUT", - "/contact-flows/$(InstanceId)", - Dict{String,Any}("Content" => Content, "Name" => Name, "Type" => Type); + "/analytics-data/instance/$(InstanceId)/associations", + Dict{String,Any}("DataSetIds" => DataSetIds); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_contact_flow( - Content, +function batch_associate_analytics_data_set( + DataSetIds, InstanceId, - Name, - Type, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( "PUT", - "/contact-flows/$(InstanceId)", + "/analytics-data/instance/$(InstanceId)/associations", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("Content" => Content, "Name" => Name, "Type" => Type), - params, - ), + mergewith(_merge, Dict{String,Any}("DataSetIds" => DataSetIds), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -673,57 +759,47 @@ function create_contact_flow( end """ - create_contact_flow_module(content, instance_id, name) - create_contact_flow_module(content, instance_id, name, params::Dict{String,<:Any}) + batch_disassociate_analytics_data_set(data_set_ids, instance_id) + batch_disassociate_analytics_data_set(data_set_ids, instance_id, params::Dict{String,<:Any}) -Creates a flow module for the specified Amazon Connect instance. +This API is in preview release for Amazon Connect and is subject to change. Removes a list +of analytics datasets associated with a given Amazon Connect instance. You can disassociate +multiple datasets in a single call. # Arguments -- `content`: The content of the flow module. +- `data_set_ids`: An array of associated dataset identifiers to remove. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `name`: The name of the flow module. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If not provided, the Amazon Web Services SDK populates this - field. For more information about idempotency, see Making retries safe with idempotent APIs. -- `"Description"`: The description of the flow module. -- `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +- `"TargetAccountId"`: The identifier of the target account. Use to disassociate a dataset + from a different account than the one containing the Amazon Connect instance. If not + specified, by default this value is the Amazon Web Services account that has the Amazon + Connect instance. """ -function create_contact_flow_module( - Content, InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() +function batch_disassociate_analytics_data_set( + DataSetIds, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( - "PUT", - "/contact-flow-modules/$(InstanceId)", - Dict{String,Any}( - "Content" => Content, "Name" => Name, "ClientToken" => string(uuid4()) - ); + "POST", + "/analytics-data/instance/$(InstanceId)/associations", + Dict{String,Any}("DataSetIds" => DataSetIds); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_contact_flow_module( - Content, +function batch_disassociate_analytics_data_set( + DataSetIds, InstanceId, - Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "PUT", - "/contact-flow-modules/$(InstanceId)", + "POST", + "/analytics-data/instance/$(InstanceId)/associations", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "Content" => Content, "Name" => Name, "ClientToken" => string(uuid4()) - ), - params, - ), + mergewith(_merge, Dict{String,Any}("DataSetIds" => DataSetIds), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -731,57 +807,51 @@ function create_contact_flow_module( end """ - create_evaluation_form(instance_id, items, title) - create_evaluation_form(instance_id, items, title, params::Dict{String,<:Any}) + batch_get_attached_file_metadata(file_ids, instance_id, associated_resource_arn) + batch_get_attached_file_metadata(file_ids, instance_id, associated_resource_arn, params::Dict{String,<:Any}) -Creates an evaluation form in the specified Amazon Connect instance. The form can be used -to define questions related to agent performance, and create sections to organize such -questions. Question and section identifiers cannot be duplicated within the same evaluation -form. +Allows you to retrieve metadata about multiple attached files on an associated resource. +Each attached file provided in the input list must be associated with the input +AssociatedResourceArn. # Arguments -- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance - ID in the Amazon Resource Name (ARN) of the instance. -- `items`: Items that are part of the evaluation form. The total number of sections and - questions must not exceed 100 each. Questions must be contained in a section. -- `title`: A title of the evaluation form. +- `file_ids`: The unique identifiers of the attached file resource. +- `instance_id`: The unique identifier of the Connect instance. +- `associated_resource_arn`: The resource to which the attached file is (being) uploaded + to. Cases are the only current supported resource. This value must be a valid ARN. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If not provided, the Amazon Web Services SDK populates this - field. For more information about idempotency, see Making retries safe with idempotent APIs. -- `"Description"`: The description of the evaluation form. -- `"ScoringStrategy"`: A scoring strategy of the evaluation form. """ -function create_evaluation_form( - InstanceId, Items, Title; aws_config::AbstractAWSConfig=global_aws_config() +function batch_get_attached_file_metadata( + FileIds, + InstanceId, + associatedResourceArn; + aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "PUT", - "/evaluation-forms/$(InstanceId)", + "POST", + "/attached-files/$(InstanceId)", Dict{String,Any}( - "Items" => Items, "Title" => Title, "ClientToken" => string(uuid4()) + "FileIds" => FileIds, "associatedResourceArn" => associatedResourceArn ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_evaluation_form( +function batch_get_attached_file_metadata( + FileIds, InstanceId, - Items, - Title, + associatedResourceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "PUT", - "/evaluation-forms/$(InstanceId)", + "POST", + "/attached-files/$(InstanceId)", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "Items" => Items, "Title" => Title, "ClientToken" => string(uuid4()) + "FileIds" => FileIds, "associatedResourceArn" => associatedResourceArn ), params, ), @@ -792,33 +862,489 @@ function create_evaluation_form( end """ - create_hours_of_operation(config, instance_id, name, time_zone) - create_hours_of_operation(config, instance_id, name, time_zone, params::Dict{String,<:Any}) + batch_get_flow_association(instance_id, resource_ids) + batch_get_flow_association(instance_id, resource_ids, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Creates hours -of operation. +Retrieve the flow associations for the given resources. # Arguments -- `config`: Configuration information for the hours of operation: day, start time, and end - time. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `name`: The name of the hours of operation. -- `time_zone`: The time zone of the hours of operation. +- `resource_ids`: A list of resource identifiers to retrieve flow associations. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the hours of operation. -- `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +- `"ResourceType"`: The type of resource association. """ -function create_hours_of_operation( - Config, InstanceId, Name, TimeZone; aws_config::AbstractAWSConfig=global_aws_config() +function batch_get_flow_association( + InstanceId, ResourceIds; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( - "PUT", - "/hours-of-operations/$(InstanceId)", - Dict{String,Any}("Config" => Config, "Name" => Name, "TimeZone" => TimeZone); + "POST", + "/flow-associations-batch/$(InstanceId)", + Dict{String,Any}("ResourceIds" => ResourceIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_flow_association( + InstanceId, + ResourceIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/flow-associations-batch/$(InstanceId)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceIds" => ResourceIds), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_put_contact(contact_data_request_list, instance_id) + batch_put_contact(contact_data_request_list, instance_id, params::Dict{String,<:Any}) + + Only the Amazon Connect outbound campaigns service principal is allowed to assume a role +in your account and call this API. Allows you to create a batch of contacts in Amazon +Connect. The outbound campaigns capability ingests dial requests via the +PutDialRequestBatch API. It then uses BatchPutContact to create contacts corresponding to +those dial requests. If agents are available, the dial requests are dialed out, which +results in a voice call. The resulting voice call uses the same contactId that was created +by BatchPutContact. + +# Arguments +- `contact_data_request_list`: List of individual contact requests. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +""" +function batch_put_contact( + ContactDataRequestList, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/contact/batch/$(InstanceId)", + Dict{String,Any}( + "ContactDataRequestList" => ContactDataRequestList, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_put_contact( + ContactDataRequestList, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact/batch/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactDataRequestList" => ContactDataRequestList, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + claim_phone_number(phone_number) + claim_phone_number(phone_number, params::Dict{String,<:Any}) + +Claims an available phone number to your Amazon Connect instance or traffic distribution +group. You can call this API only in the same Amazon Web Services Region where the Amazon +Connect instance or traffic distribution group was created. For more information about how +to use this operation, see Claim a phone number in your country and Claim phone numbers to +traffic distribution groups in the Amazon Connect Administrator Guide. You can call the +SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the +DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. If +you plan to claim and release numbers frequently, contact us for a service quota exception. +Otherwise, it is possible you will be blocked from claiming and releasing any more numbers +until up to 180 days past the oldest number released has expired. By default you can claim +and release up to 200% of your maximum number of active phone numbers. If you claim and +release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% +of your phone number service level quota, you will be blocked from claiming any more +numbers until 180 days past the oldest number released has expired. For example, if you +already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any +180 day period you release 99, claim 99, and then release 99, you will have exceeded the +200% limit. At that point you are blocked from claiming any more numbers until you open an +Amazon Web Services support ticket. + +# Arguments +- `phone_number`: The phone number you want to claim. Phone numbers are formatted [+] + [country code] [subscriber number including area code]. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent + APIs. Pattern: ^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12} +- `"InstanceId"`: The identifier of the Amazon Connect instance that phone numbers are + claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + You must enter InstanceId or TargetArn. +- `"PhoneNumberDescription"`: The description of the phone number. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +- `"TargetArn"`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic + distribution groups that phone number inbound traffic is routed through. You must enter + InstanceId or TargetArn. +""" +function claim_phone_number(PhoneNumber; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "POST", + "/phone-number/claim", + Dict{String,Any}("PhoneNumber" => PhoneNumber, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function claim_phone_number( + PhoneNumber, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/phone-number/claim", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "PhoneNumber" => PhoneNumber, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + complete_attached_file_upload(file_id, instance_id, associated_resource_arn) + complete_attached_file_upload(file_id, instance_id, associated_resource_arn, params::Dict{String,<:Any}) + +Allows you to confirm that the attached file has been uploaded using the pre-signed URL +provided in the StartAttachedFileUpload API. + +# Arguments +- `file_id`: The unique identifier of the attached file resource. +- `instance_id`: The unique identifier of the Connect instance. +- `associated_resource_arn`: The resource to which the attached file is (being) uploaded + to. Cases are the only current supported resource. This value must be a valid ARN. + +""" +function complete_attached_file_upload( + FileId, + InstanceId, + associatedResourceArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}("associatedResourceArn" => associatedResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function complete_attached_file_upload( + FileId, + InstanceId, + associatedResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("associatedResourceArn" => associatedResourceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_agent_status(instance_id, name, state) + create_agent_status(instance_id, name, state, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. Creates an +agent status for the specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the status. +- `state`: The state of the status. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the status. +- `"DisplayOrder"`: The display order of the status. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_agent_status( + InstanceId, Name, State; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/agent-status/$(InstanceId)", + Dict{String,Any}("Name" => Name, "State" => State); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_agent_status( + InstanceId, + Name, + State, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/agent-status/$(InstanceId)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("Name" => Name, "State" => State), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_contact_flow(content, instance_id, name, type) + create_contact_flow(content, instance_id, name, type, params::Dict{String,<:Any}) + +Creates a flow for the specified Amazon Connect instance. You can also create and update +flows using the Amazon Connect Flow language. + +# Arguments +- `content`: The JSON string that represents the content of the flow. For an example, see + Example flow in Amazon Connect Flow language. Length Constraints: Minimum length of 1. + Maximum length of 256000. +- `instance_id`: The identifier of the Amazon Connect instance. +- `name`: The name of the flow. +- `type`: The type of the flow. For descriptions of the available types, see Choose a flow + type in the Amazon Connect Administrator Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the flow. +- `"Status"`: Indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status + will initiate validation on the content. the SAVED status does not initiate validation of + the content. SAVED | PUBLISHED. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_contact_flow( + Content, InstanceId, Name, Type; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/contact-flows/$(InstanceId)", + Dict{String,Any}("Content" => Content, "Name" => Name, "Type" => Type); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_contact_flow( + Content, + InstanceId, + Name, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact-flows/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Content" => Content, "Name" => Name, "Type" => Type), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_contact_flow_module(content, instance_id, name) + create_contact_flow_module(content, instance_id, name, params::Dict{String,<:Any}) + +Creates a flow module for the specified Amazon Connect instance. + +# Arguments +- `content`: The JSON string that represents the content of the flow. For an example, see + Example flow in Amazon Connect Flow language. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the flow module. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"Description"`: The description of the flow module. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_contact_flow_module( + Content, InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/contact-flow-modules/$(InstanceId)", + Dict{String,Any}( + "Content" => Content, "Name" => Name, "ClientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_contact_flow_module( + Content, + InstanceId, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact-flow-modules/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Content" => Content, "Name" => Name, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_evaluation_form(instance_id, items, title) + create_evaluation_form(instance_id, items, title, params::Dict{String,<:Any}) + +Creates an evaluation form in the specified Amazon Connect instance. The form can be used +to define questions related to agent performance, and create sections to organize such +questions. Question and section identifiers cannot be duplicated within the same evaluation +form. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `items`: Items that are part of the evaluation form. The total number of sections and + questions must not exceed 100 each. Questions must be contained in a section. +- `title`: A title of the evaluation form. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"Description"`: The description of the evaluation form. +- `"ScoringStrategy"`: A scoring strategy of the evaluation form. +""" +function create_evaluation_form( + InstanceId, Items, Title; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/evaluation-forms/$(InstanceId)", + Dict{String,Any}( + "Items" => Items, "Title" => Title, "ClientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_evaluation_form( + InstanceId, + Items, + Title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/evaluation-forms/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Items" => Items, "Title" => Title, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_hours_of_operation(config, instance_id, name, time_zone) + create_hours_of_operation(config, instance_id, name, time_zone, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. Creates hours +of operation. + +# Arguments +- `config`: Configuration information for the hours of operation: day, start time, and end + time. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the hours of operation. +- `time_zone`: The time zone of the hours of operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the hours of operation. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_hours_of_operation( + Config, InstanceId, Name, TimeZone; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/hours-of-operations/$(InstanceId)", + Dict{String,Any}("Config" => Config, "Name" => Name, "TimeZone" => TimeZone); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -871,6 +1397,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: The idempotency token. - `"DirectoryId"`: The identifier for the directory. - `"InstanceAlias"`: The name for your instance. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_instance( IdentityManagementType, @@ -939,7 +1467,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceType"`: The type of the data source. This field is only required for the EVENT integration type. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_integration_association( InstanceId, @@ -959,19 +1487,176 @@ function create_integration_association( end function create_integration_association( InstanceId, - IntegrationArn, - IntegrationType, + IntegrationArn, + IntegrationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/instance/$(InstanceId)/integration-associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IntegrationArn" => IntegrationArn, "IntegrationType" => IntegrationType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_participant(contact_id, instance_id, participant_details) + create_participant(contact_id, instance_id, participant_details, params::Dict{String,<:Any}) + +Adds a new participant into an on-going chat contact. For more information, see Customize +chat flow experiences by integrating custom participants. + +# Arguments +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. Only + contacts in the CHAT channel are supported. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `participant_details`: Information identifying the participant. The only Valid value for + ParticipantRole is CUSTOM_BOT. DisplayName is Required. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +""" +function create_participant( + ContactId, + InstanceId, + ParticipantDetails; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/create-participant", + Dict{String,Any}( + "ContactId" => ContactId, + "InstanceId" => InstanceId, + "ParticipantDetails" => ParticipantDetails, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_participant( + ContactId, + InstanceId, + ParticipantDetails, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/create-participant", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactId" => ContactId, + "InstanceId" => InstanceId, + "ParticipantDetails" => ParticipantDetails, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_persistent_contact_association(initial_contact_id, instance_id, rehydration_type, source_contact_id) + create_persistent_contact_association(initial_contact_id, instance_id, rehydration_type, source_contact_id, params::Dict{String,<:Any}) + +Enables rehydration of chats for the lifespan of a contact. For more information about chat +rehydration, see Enable persistent chat in the Amazon Connect Administrator Guide. + +# Arguments +- `initial_contact_id`: This is the contactId of the current contact that the + CreatePersistentContactAssociation API is being called from. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `rehydration_type`: The contactId chosen for rehydration depends on the type chosen. + ENTIRE_PAST_SESSION: Rehydrates a chat from the most recently terminated past chat contact + of the specified past ended chat session. To use this type, provide the initialContactId of + the past ended chat session in the sourceContactId field. In this type, Amazon Connect + determines what the most recent chat contact on the past ended chat session and uses it to + start a persistent chat. FROM_SEGMENT: Rehydrates a chat from the specified past chat + contact provided in the sourceContactId field. The actual contactId used for rehydration + is provided in the response of this API. To illustrate how to use rehydration type, + consider the following example: A customer starts a chat session. Agent a1 accepts the chat + and a conversation starts between the customer and Agent a1. This first contact creates a + contact ID C1. Agent a1 then transfers the chat to Agent a2. This creates another contact + ID C2. At this point Agent a2 ends the chat. The customer is forwarded to the disconnect + flow for a post chat survey that creates another contact ID C3. After the chat survey, the + chat session ends. Later, the customer returns and wants to resume their past chat session. + At this point, the customer can have following use cases: Use Case 1: The customer + wants to continue the past chat session but they want to hide the post chat survey. For + this they will use the following configuration: Configuration SourceContactId = + \"C2\" RehydrationType = \"FROM_SEGMENT\" Expected behavior This starts a + persistent chat session from the specified past ended contact (C2). Transcripts of past + chat sessions C2 and C1 are accessible in the current persistent chat session. Note that + chat segment C3 is dropped from the persistent chat session. Use Case 2: The + customer wants to continue the past chat session and see the transcript of the entire past + engagement, including the post chat survey. For this they will use the following + configuration: Configuration SourceContactId = \"C1\" RehydrationType = + \"ENTIRE_PAST_SESSION\" Expected behavior This starts a persistent chat session + from the most recently ended chat contact (C3). Transcripts of past chat sessions C3, C2 + and C1 are accessible in the current persistent chat session. +- `source_contact_id`: The contactId from which a persistent chat session must be started. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +""" +function create_persistent_contact_association( + InitialContactId, + InstanceId, + RehydrationType, + SourceContactId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/persistent-contact-association/$(InstanceId)/$(InitialContactId)", + Dict{String,Any}( + "RehydrationType" => RehydrationType, "SourceContactId" => SourceContactId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_persistent_contact_association( + InitialContactId, + InstanceId, + RehydrationType, + SourceContactId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "PUT", - "/instance/$(InstanceId)/integration-associations", + "POST", + "/contact/persistent-contact-association/$(InstanceId)/$(InitialContactId)", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "IntegrationArn" => IntegrationArn, "IntegrationType" => IntegrationType + "RehydrationType" => RehydrationType, + "SourceContactId" => SourceContactId, ), params, ), @@ -982,66 +1667,41 @@ function create_integration_association( end """ - create_participant(contact_id, instance_id, participant_details) - create_participant(contact_id, instance_id, participant_details, params::Dict{String,<:Any}) + create_predefined_attribute(instance_id, name, values) + create_predefined_attribute(instance_id, name, values, params::Dict{String,<:Any}) -Adds a new participant into an on-going chat contact. For more information, see Customize -chat flow experiences by integrating custom participants. +Creates a new predefined attribute for the specified Amazon Connect instance. # Arguments -- `contact_id`: The identifier of the contact in this instance of Amazon Connect. Only - contacts in the CHAT channel are supported. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `participant_details`: Information identifying the participant. The only Valid value for - ParticipantRole is CUSTOM_BOT. DisplayName is Required. +- `name`: The name of the predefined attribute. +- `values`: The values of the predefined attribute. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If not provided, the Amazon Web Services SDK populates this - field. For more information about idempotency, see Making retries safe with idempotent APIs. """ -function create_participant( - ContactId, - InstanceId, - ParticipantDetails; - aws_config::AbstractAWSConfig=global_aws_config(), +function create_predefined_attribute( + InstanceId, Name, Values; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( - "POST", - "/contact/create-participant", - Dict{String,Any}( - "ContactId" => ContactId, - "InstanceId" => InstanceId, - "ParticipantDetails" => ParticipantDetails, - "ClientToken" => string(uuid4()), - ); + "PUT", + "/predefined-attributes/$(InstanceId)", + Dict{String,Any}("Name" => Name, "Values" => Values); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_participant( - ContactId, +function create_predefined_attribute( InstanceId, - ParticipantDetails, + Name, + Values, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "POST", - "/contact/create-participant", + "PUT", + "/predefined-attributes/$(InstanceId)", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ContactId" => ContactId, - "InstanceId" => InstanceId, - "ParticipantDetails" => ParticipantDetails, - "ClientToken" => string(uuid4()), - ), - params, - ), + mergewith(_merge, Dict{String,Any}("Name" => Name, "Values" => Values), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1053,19 +1713,20 @@ end create_prompt(instance_id, name, s3_uri, params::Dict{String,<:Any}) Creates a prompt. For more information about prompts, such as supported file types and -maximum length, see Create prompts in the Amazon Connect Administrator's Guide. +maximum length, see Create prompts in the Amazon Connect Administrator Guide. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. - `name`: The name of the prompt. -- `s3_uri`: The URI for the S3 bucket where the prompt is stored. +- `s3_uri`: The URI for the S3 bucket where the prompt is stored. You can provide S3 + pre-signed URLs returned by the GetPromptFile API instead of providing S3 URIs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: The description of the prompt. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_prompt( InstanceId, Name, S3Uri; aws_config::AbstractAWSConfig=global_aws_config() @@ -1101,15 +1762,19 @@ end create_queue(hours_of_operation_id, instance_id, name, params::Dict{String,<:Any}) This API is in preview release for Amazon Connect and is subject to change. Creates a new -queue for the specified Amazon Connect instance. If the number being used in the input is -claimed to a traffic distribution group, and you are calling this API using an instance in -the Amazon Web Services Region where the traffic distribution group was created, you can -use either a full phone number ARN or UUID value for the OutboundCallerIdNumberId value of -the OutboundCallerConfig request body parameter. However, if the number is claimed to a -traffic distribution group and you are calling this API using an instance in the alternate -Amazon Web Services Region associated with the traffic distribution group, you must provide -a full phone number ARN. If a UUID is provided in this scenario, you will receive a -ResourceNotFoundException. +queue for the specified Amazon Connect instance. If the phone number is claimed to a +traffic distribution group that was created in the same Region as the Amazon Connect +instance where you are calling this API, then you can use a full phone number ARN or a UUID +for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic +distribution group that is in one Region, and you are calling this API from an instance in +another Amazon Web Services Region that is associated with the traffic distribution group, +you must provide a full phone number ARN. If a UUID is provided in this scenario, you will +receive a ResourceNotFoundException. Only use the phone number ARN format that doesn't +contain instance in the path, for example, +arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is +returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to +allow/deny access to this API for phone number resources claimed to a traffic distribution +group, see Allow or Deny queue API actions for phone numbers in a replica Region. # Arguments - `hours_of_operation_id`: The identifier for the hours of operation. @@ -1125,7 +1790,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OutboundCallerConfig"`: The outbound caller ID name, number, and outbound whisper flow. - `"QuickConnectIds"`: The quick connects available to agents who are working the queue. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_queue( HoursOfOperationId, InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -1171,14 +1836,14 @@ Creates a quick connect for the specified Amazon Connect instance. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `name`: The name of the quick connect. +- `name`: A unique name of the quick connect. - `quick_connect_config`: Configuration settings for the quick connect. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: The description of the quick connect. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_quick_connect( InstanceId, Name, QuickConnectConfig; aws_config::AbstractAWSConfig=global_aws_config() @@ -1232,13 +1897,16 @@ Creates a new routing profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AgentAvailabilityTimer"`: Whether agents with this routing profile will have their + routing order calculated based on longest idle time or time since their last inbound + contact. - `"QueueConfigs"`: The inbound queues associated with the routing profile. If no queue is added, the agent can make only outbound calls. The limit of 10 array members applies to the maximum number of RoutingProfileQueueConfig objects that can be passed during a CreateRoutingProfile API request. It is different from the quota of 50 queues per routing profile per instance that is listed in Amazon Connect service quotas. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_routing_profile( DefaultOutboundQueueId, @@ -1372,8 +2040,7 @@ end create_security_profile(instance_id, security_profile_name) create_security_profile(instance_id, security_profile_name, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Creates a -security profile. +Creates a security profile. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -1382,16 +2049,22 @@ security profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedAccessControlHierarchyGroupId"`: The identifier of the hierarchy group that a + security profile uses to restrict access to resources in Amazon Connect. - `"AllowedAccessControlTags"`: The list of tags that a security profile uses to restrict access to resources in Amazon Connect. +- `"Applications"`: A list of third-party applications that the security profile will give + access to. - `"Description"`: The description of the security profile. +- `"HierarchyRestrictedResources"`: The list of resources that a security profile applies + hierarchy restrictions to in Amazon Connect. Following are acceptable ResourceNames: User. - `"Permissions"`: Permissions assigned to the security profile. For a list of valid permissions, see List of security profile permissions. - `"TagRestrictedResources"`: The list of resources that a security profile applies tag restrictions to in Amazon Connect. Following are acceptable ResourceNames: User | SecurityProfile | Queue | RoutingProfile - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_security_profile( InstanceId, SecurityProfileName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1494,8 +2167,12 @@ end create_traffic_distribution_group(instance_id, name, params::Dict{String,<:Any}) Creates a traffic distribution group given an Amazon Connect instance that has been -replicated. For more information about creating traffic distribution groups, see Set up -traffic distribution groups in the Amazon Connect Administrator Guide. +replicated. The SignInConfig distribution is available only on a default +TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data +type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default +TrafficDistributionGroup, an InvalidRequestException is returned. For more information +about creating traffic distribution groups, see Set up traffic distribution groups in the +Amazon Connect Administrator Guide. # Arguments - `instance_id`: The identifier of the Amazon Connect instance that has been replicated. @@ -1509,7 +2186,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"Description"`: A description for the traffic distribution group. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_traffic_distribution_group( InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -1565,7 +2242,7 @@ Creates a use case for an integration association. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_use_case( InstanceId, @@ -1603,9 +2280,11 @@ end create_user(instance_id, phone_config, routing_profile_id, security_profile_ids, username) create_user(instance_id, phone_config, routing_profile_id, security_profile_ids, username, params::Dict{String,<:Any}) -Creates a user account for the specified Amazon Connect instance. For information about how -to create user accounts using the Amazon Connect console, see Add Users in the Amazon -Connect Administrator Guide. +Creates a user account for the specified Amazon Connect instance. Certain UserIdentityInfo +parameters are required in some situations. For example, Email is required if you are using +SAML for identity management. FirstName and LastName are required if you are using Amazon +Connect or SAML for identity management. For information about how to create users using +the Amazon Connect admin website, see Add Users in the Amazon Connect Administrator Guide. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -1616,6 +2295,8 @@ Connect Administrator Guide. - `username`: The user name for the account. For instances not using SAML for identity management, the user name can include up to 20 characters. If you are using SAML for identity management, the user name can include up to 64 characters from [a-zA-Z0-9_-.@]+. + Username can include @ only if used in an email format. For example: Correct: testuser + Correct: testuser@example.com Incorrect: testuser@example # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1632,7 +2313,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Password"`: The password for the user account. A password is required if you are using Amazon Connect for identity management. Otherwise, it is an error to include a password. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_user( InstanceId, @@ -1700,7 +2381,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ParentGroupId"`: The identifier for the parent hierarchy group. The user hierarchy is created at level one if the parent group ID is null. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_user_hierarchy_group( InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -1728,6 +2409,113 @@ function create_user_hierarchy_group( ) end +""" + create_view(content, instance_id, name, status) + create_view(content, instance_id, name, status, params::Dict{String,<:Any}) + +Creates a new view with the possible status of SAVED or PUBLISHED. The views will have a +unique name for each connect instance. It performs basic content validation if the status +is SAVED or full content validation if the status is set to PUBLISHED. An error is returned +if validation fails. It associates either the SAVED qualifier or both of the SAVED and +LATEST qualifiers with the provided view content based on the status. The view is +idempotent if ClientToken is provided. + +# Arguments +- `content`: View content containing all content necessary to render a view except for + runtime input data. The total uncompressed content has a maximum file size of 400kB. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `name`: The name of the view. +- `status`: Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status + will initiate validation on the content. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique Id for each create view request to avoid duplicate view + creation. For example, the view is idempotent ClientToken is provided. +- `"Description"`: The description of the view. +- `"Tags"`: The tags associated with the view resource (not specific to view version).These + tags can be used to organize, track, or control access for this resource. For example, { + \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_view( + Content, InstanceId, Name, Status; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/views/$(InstanceId)", + Dict{String,Any}("Content" => Content, "Name" => Name, "Status" => Status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_view( + Content, + InstanceId, + Name, + Status, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/views/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Content" => Content, "Name" => Name, "Status" => Status), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_view_version(instance_id, view_id) + create_view_version(instance_id, view_id, params::Dict{String,<:Any}) + +Publishes a new version of the view identifier. Versions are immutable and monotonically +increasing. It returns the highest version if there is no change in content compared to +that version. An error is displayed if the supplied ViewContentSha256 is different from the +ViewContentSha256 of the LATEST alias. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"VersionDescription"`: The description for the version being published. +- `"ViewContentSha256"`: Indicates the checksum value of the latest published view content. +""" +function create_view_version( + InstanceId, ViewId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/views/$(InstanceId)/$(ViewId)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_view_version( + InstanceId, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/views/$(InstanceId)/$(ViewId)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_vocabulary(content, instance_id, language_code, vocabulary_name) create_vocabulary(content, instance_id, language_code, vocabulary_name, params::Dict{String,<:Any}) @@ -1756,7 +2544,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys APIs. If a create request is received more than once with same client token, subsequent requests return the previous response without creating a vocabulary again. - `"Tags"`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_vocabulary( Content, @@ -1858,6 +2646,56 @@ function deactivate_evaluation_form( ) end +""" + delete_attached_file(file_id, instance_id, associated_resource_arn) + delete_attached_file(file_id, instance_id, associated_resource_arn, params::Dict{String,<:Any}) + +Deletes an attached file along with the underlying S3 Object. The attached file is +permanently deleted if S3 bucket versioning is not enabled. + +# Arguments +- `file_id`: The unique identifier of the attached file resource. +- `instance_id`: The unique identifier of the Connect instance. +- `associated_resource_arn`: The resource to which the attached file is (being) uploaded + to. Cases are the only current supported resource. This value must be a valid ARN. + +""" +function delete_attached_file( + FileId, + InstanceId, + associatedResourceArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}("associatedResourceArn" => associatedResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_attached_file( + FileId, + InstanceId, + associatedResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("associatedResourceArn" => associatedResourceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_contact_evaluation(evaluation_id, instance_id) delete_contact_evaluation(evaluation_id, instance_id, params::Dict{String,<:Any}) @@ -2125,6 +2963,43 @@ function delete_integration_association( ) end +""" + delete_predefined_attribute(instance_id, name) + delete_predefined_attribute(instance_id, name, params::Dict{String,<:Any}) + +Deletes a predefined attribute from the specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the predefined attribute. + +""" +function delete_predefined_attribute( + InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/predefined-attributes/$(InstanceId)/$(Name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_predefined_attribute( + InstanceId, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/predefined-attributes/$(InstanceId)/$(Name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_prompt(instance_id, prompt_id) delete_prompt(instance_id, prompt_id, params::Dict{String,<:Any}) @@ -2155,7 +3030,86 @@ function delete_prompt( ) return connect( "DELETE", - "/prompts/$(InstanceId)/$(PromptId)", + "/prompts/$(InstanceId)/$(PromptId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_queue(instance_id, queue_id) + delete_queue(instance_id, queue_id, params::Dict{String,<:Any}) + +Deletes a queue. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `queue_id`: The identifier for the queue. + +""" +function delete_queue( + InstanceId, QueueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/queues/$(InstanceId)/$(QueueId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue( + InstanceId, + QueueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/queues/$(InstanceId)/$(QueueId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_quick_connect(instance_id, quick_connect_id) + delete_quick_connect(instance_id, quick_connect_id, params::Dict{String,<:Any}) + +Deletes a quick connect. After calling DeleteUser, it's important to call +DeleteQuickConnect to delete any records related to the deleted users. This will help you: + Avoid dangling resources that impact your service quotas. Remove deleted users so they +don't appear to agents as transfer options. Avoid the disruption of other Amazon Connect +processes, such as instance replication and syncing if you're using Amazon Connect Global +Resiliency. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `quick_connect_id`: The identifier for the quick connect. + +""" +function delete_quick_connect( + InstanceId, QuickConnectId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/quick-connects/$(InstanceId)/$(QuickConnectId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_quick_connect( + InstanceId, + QuickConnectId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/quick-connects/$(InstanceId)/$(QuickConnectId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2163,36 +3117,36 @@ function delete_prompt( end """ - delete_quick_connect(instance_id, quick_connect_id) - delete_quick_connect(instance_id, quick_connect_id, params::Dict{String,<:Any}) + delete_routing_profile(instance_id, routing_profile_id) + delete_routing_profile(instance_id, routing_profile_id, params::Dict{String,<:Any}) -Deletes a quick connect. +Deletes a routing profile. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `quick_connect_id`: The identifier for the quick connect. +- `routing_profile_id`: The identifier of the routing profile. """ -function delete_quick_connect( - InstanceId, QuickConnectId; aws_config::AbstractAWSConfig=global_aws_config() +function delete_routing_profile( + InstanceId, RoutingProfileId; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( "DELETE", - "/quick-connects/$(InstanceId)/$(QuickConnectId)"; + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_quick_connect( +function delete_routing_profile( InstanceId, - QuickConnectId, + RoutingProfileId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( "DELETE", - "/quick-connects/$(InstanceId)/$(QuickConnectId)", + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2238,8 +3192,7 @@ end delete_security_profile(instance_id, security_profile_id) delete_security_profile(instance_id, security_profile_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Deletes a -security profile. +Deletes a security profile. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -2397,7 +3350,12 @@ end Deletes a user account from the specified Amazon Connect instance. For information about what happens to a user's data when their account is deleted, see Delete Users from Your -Amazon Connect Instance in the Amazon Connect Administrator Guide. +Amazon Connect Instance in the Amazon Connect Administrator Guide. After calling +DeleteUser, call DeleteQuickConnect to delete any records related to the deleted users. +This will help you: Avoid dangling resources that impact your service quotas. Remove +deleted users so they don't appear to agents as transfer options. Avoid the disruption of +other Amazon Connect processes, such as instance replication and syncing if you're using +Amazon Connect Global Resiliency. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -2466,6 +3424,81 @@ function delete_user_hierarchy_group( ) end +""" + delete_view(instance_id, view_id) + delete_view(instance_id, view_id, params::Dict{String,<:Any}) + +Deletes the view entirely. It deletes the view and all associated qualifiers (versions and +aliases). + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. + +""" +function delete_view(InstanceId, ViewId; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "DELETE", + "/views/$(InstanceId)/$(ViewId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_view( + InstanceId, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/views/$(InstanceId)/$(ViewId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_view_version(instance_id, view_id, view_version) + delete_view_version(instance_id, view_id, view_version, params::Dict{String,<:Any}) + +Deletes the particular version specified in ViewVersion identifier. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. +- `view_version`: The version number of the view. + +""" +function delete_view_version( + InstanceId, ViewId, ViewVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/views/$(InstanceId)/$(ViewId)/versions/$(ViewVersion)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_view_version( + InstanceId, + ViewId, + ViewVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/views/$(InstanceId)/$(ViewId)/versions/$(ViewVersion)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_vocabulary(instance_id, vocabulary_id) delete_vocabulary(instance_id, vocabulary_id, params::Dict{String,<:Any}) @@ -2623,7 +3656,12 @@ end describe_contact_flow(contact_flow_id, instance_id, params::Dict{String,<:Any}) Describes the specified flow. You can also create and update flows using the Amazon Connect -Flow language. +Flow language. Use the SAVED alias in the request to describe the SAVED content of a Flow. +For example, arn:aws:.../contact-flow/{id}:SAVED. Once a contact flow is published, SAVED +needs to be supplied to view saved content that has not been published. In the response, +Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will +initiate validation on the content. SAVED does not initiate validation of the content. +SAVED | PUBLISHED # Arguments - `contact_flow_id`: The identifier of the flow. @@ -2659,7 +3697,10 @@ end describe_contact_flow_module(contact_flow_module_id, instance_id) describe_contact_flow_module(contact_flow_module_id, instance_id, params::Dict{String,<:Any}) -Describes the specified flow module. +Describes the specified flow module. Use the SAVED alias in the request to describe the +SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:SAVED. Once a contact +flow is published, SAVED needs to be supplied to view saved content that has not been +published. # Arguments - `contact_flow_module_id`: The identifier of the flow module. @@ -2936,6 +3977,43 @@ function describe_phone_number( ) end +""" + describe_predefined_attribute(instance_id, name) + describe_predefined_attribute(instance_id, name, params::Dict{String,<:Any}) + +Describes a predefined attribute for the specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the predefined attribute. + +""" +function describe_predefined_attribute( + InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/predefined-attributes/$(InstanceId)/$(Name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_predefined_attribute( + InstanceId, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/predefined-attributes/$(InstanceId)/$(Name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_prompt(instance_id, prompt_id) describe_prompt(instance_id, prompt_id, params::Dict{String,<:Any}) @@ -3126,8 +4204,7 @@ end describe_security_profile(instance_id, security_profile_id) describe_security_profile(instance_id, security_profile_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Gets basic -information about the security profle. +Gets basic information about the security profle. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -3201,9 +4278,9 @@ end describe_user(instance_id, user_id) describe_user(instance_id, user_id, params::Dict{String,<:Any}) -Describes the specified user account. You can find the instance ID in the Amazon Connect -console (it’s the final part of the ARN). The console does not display the user IDs. -Instead, list the users and note the IDs provided in the output. +Describes the specified user. You can find the instance ID in the Amazon Connect console +(it’s the final part of the ARN). The console does not display the user IDs. Instead, +list the users and note the IDs provided in the output. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -3308,6 +4385,50 @@ function describe_user_hierarchy_structure( ) end +""" + describe_view(instance_id, view_id) + describe_view(instance_id, view_id, params::Dict{String,<:Any}) + +Retrieves the view for the specified Amazon Connect instance and view identifier. The view +identifier can be supplied as a ViewId or ARN. SAVED needs to be supplied if a view is +unpublished. The view identifier can contain an optional qualifier, for example, +<view-id>:SAVED, which is either an actual version number or an Amazon Connect +managed qualifier SAVED | LATEST. If it is not supplied, then LATEST is assumed for +customer managed views and an error is returned if there is no published content available. +Version 1 is assumed for Amazon Web Services managed views. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The ViewId of the view. This must be an ARN for Amazon Web Services managed + views. + +""" +function describe_view( + InstanceId, ViewId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/views/$(InstanceId)/$(ViewId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_view( + InstanceId, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/views/$(InstanceId)/$(ViewId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_vocabulary(instance_id, vocabulary_id) describe_vocabulary(instance_id, vocabulary_id, params::Dict{String,<:Any}) @@ -3345,6 +4466,53 @@ function describe_vocabulary( ) end +""" + disassociate_analytics_data_set(data_set_id, instance_id) + disassociate_analytics_data_set(data_set_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. Removes the +dataset ID associated with a given Amazon Connect instance. + +# Arguments +- `data_set_id`: The identifier of the dataset to remove. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"TargetAccountId"`: The identifier of the target account. Use to associate a dataset to + a different account than the one containing the Amazon Connect instance. If not specified, + by default this value is the Amazon Web Services account that has the Amazon Connect + instance. +""" +function disassociate_analytics_data_set( + DataSetId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/analytics-data/instance/$(InstanceId)/association", + Dict{String,Any}("DataSetId" => DataSetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_analytics_data_set( + DataSetId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/analytics-data/instance/$(InstanceId)/association", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("DataSetId" => DataSetId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_approved_origin(instance_id, origin) disassociate_approved_origin(instance_id, origin, params::Dict{String,<:Any}) @@ -3423,6 +4591,45 @@ function disassociate_bot( ) end +""" + disassociate_flow(instance_id, resource_id, resource_type) + disassociate_flow(instance_id, resource_id, resource_type, params::Dict{String,<:Any}) + +Disassociates a connect resource from a flow. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `resource_id`: The identifier of the resource. +- `resource_type`: A valid resource type. + +""" +function disassociate_flow( + InstanceId, ResourceId, ResourceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/flow-associations/$(InstanceId)/$(ResourceId)/$(ResourceType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_flow( + InstanceId, + ResourceId, + ResourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/flow-associations/$(InstanceId)/$(ResourceId)/$(ResourceType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_instance_storage_config(association_id, instance_id, resource_type) disassociate_instance_storage_config(association_id, instance_id, resource_type, params::Dict{String,<:Any}) @@ -3736,6 +4943,102 @@ function disassociate_security_key( ) end +""" + disassociate_traffic_distribution_group_user(instance_id, traffic_distribution_group_id, user_id) + disassociate_traffic_distribution_group_user(instance_id, traffic_distribution_group_id, user_id, params::Dict{String,<:Any}) + +Disassociates an agent from a traffic distribution group. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `traffic_distribution_group_id`: The identifier of the traffic distribution group. This + can be the ID or the ARN if the API is being called in the Region where the traffic + distribution group was created. The ARN must be provided if the call is from the replicated + Region. +- `user_id`: The identifier for the user. This can be the ID or the ARN of the user. + +""" +function disassociate_traffic_distribution_group_user( + InstanceId, + TrafficDistributionGroupId, + UserId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user", + Dict{String,Any}("InstanceId" => InstanceId, "UserId" => UserId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_traffic_distribution_group_user( + InstanceId, + TrafficDistributionGroupId, + UserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstanceId" => InstanceId, "UserId" => UserId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_user_proficiencies(instance_id, user_id, user_proficiencies) + disassociate_user_proficiencies(instance_id, user_id, user_proficiencies, params::Dict{String,<:Any}) + +Disassociates a set of proficiencies from a user. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `user_id`: The identifier of the user account. +- `user_proficiencies`: The proficiencies to disassociate from the user. + +""" +function disassociate_user_proficiencies( + InstanceId, UserId, UserProficiencies; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/users/$(InstanceId)/$(UserId)/disassociate-proficiencies", + Dict{String,Any}("UserProficiencies" => UserProficiencies); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_user_proficiencies( + InstanceId, + UserId, + UserProficiencies, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/users/$(InstanceId)/$(UserId)/disassociate-proficiencies", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("UserProficiencies" => UserProficiencies), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ dismiss_user_contact(contact_id, instance_id, user_id) dismiss_user_contact(contact_id, instance_id, user_id, params::Dict{String,<:Any}) @@ -3780,6 +5083,61 @@ function dismiss_user_contact( ) end +""" + get_attached_file(file_id, instance_id, associated_resource_arn) + get_attached_file(file_id, instance_id, associated_resource_arn, params::Dict{String,<:Any}) + +Provides a pre-signed URL for download of an approved attached file. This API also returns +metadata about the attached file. It will only return a downloadURL if the status of the +attached file is APPROVED. + +# Arguments +- `file_id`: The unique identifier of the attached file resource. +- `instance_id`: The unique identifier of the Connect instance. +- `associated_resource_arn`: The resource to which the attached file is (being) uploaded + to. Cases are the only current supported resource. This value must be a valid ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"urlExpiryInSeconds"`: Optional override for the expiry of the pre-signed S3 URL in + seconds. The default value is 300. +""" +function get_attached_file( + FileId, + InstanceId, + associatedResourceArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}("associatedResourceArn" => associatedResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_attached_file( + FileId, + InstanceId, + associatedResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/attached-files/$(InstanceId)/$(FileId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("associatedResourceArn" => associatedResourceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_contact_attributes(initial_contact_id, instance_id) get_contact_attributes(initial_contact_id, instance_id, params::Dict{String,<:Any}) @@ -3840,16 +5198,22 @@ Administrator Guide. says SECONDS and the Value is returned in SECONDS. When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this: { \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, - \"Value\": 24113.0 } The actual OLDEST_CONTACT_AGE is 24 seconds. Name in real-time metrics - report: Oldest SLOTS_ACTIVE Unit: COUNT Name in real-time metrics report: Active - SLOTS_AVAILABLE Unit: COUNT Name in real-time metrics report: Availability + \"Value\": 24113.0 } The actual OLDEST_CONTACT_AGE is 24 seconds. When the filter + RoutingStepExpression is used, this metric is still calculated from enqueue time. For + example, if a contact that has been queued under <Expression 1> for 10 seconds has + expired and <Expression 2> becomes active, then OLDEST_CONTACT_AGE for this queue + will be counted starting from 10, not 0. Name in real-time metrics report: Oldest + SLOTS_ACTIVE Unit: COUNT Name in real-time metrics report: Active SLOTS_AVAILABLE Unit: + COUNT Name in real-time metrics report: Availability - `filters`: The filters to apply to returned metrics. You can filter up to the following limits: Queues: 100 Routing profiles: 100 Channels: 3 (VOICE, CHAT, and TASK channels - are supported.) Metric data is retrieved only for the resources associated with the - queues or routing profiles, and by any channels included in the filter. (You cannot filter - by both queue AND routing profile.) You can include both resource IDs and resource ARNs in - the same request. Currently tagging is only supported on the resources that are passed in - the filter. + are supported.) RoutingStepExpressions: 50 Metric data is retrieved only for the + resources associated with the queues or routing profiles, and by any channels included in + the filter. (You cannot filter by both queue AND routing profile.) You can include both + resource IDs and resource ARNs in the same request. When using the RoutingStepExpression + filter, you need to pass exactly one QueueId. The filter is also case sensitive so when + using the RoutingStepExpression filter, grouping by ROUTING_STEP_EXPRESSION is required. + Currently tagging is only supported on the resources that are passed in the filter. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -3861,7 +5225,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys channels are supported. If you group by ROUTING_PROFILE, you must include either a queue or routing profile filter. In addition, a routing profile filter is required for metrics CONTACTS_SCHEDULED, CONTACTS_IN_QUEUE, and OLDEST_CONTACT_AGE. If no Grouping is - included in the request, a summary of metrics is returned. + included in the request, a summary of metrics is returned. When using the + RoutingStepExpression filter, group by ROUTING_STEP_EXPRESSION is required. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The token @@ -3957,10 +5322,13 @@ end get_federation_token(instance_id) get_federation_token(instance_id, params::Dict{String,<:Any}) -Retrieves a token for federation. This API doesn't support root users. If you try to -invoke GetFederationToken with root credentials, an error message similar to the following -one appears: Provided identity: Principal: .... User: .... cannot be used for federation -with Amazon Connect +Supports SAML sign-in for Amazon Connect. Retrieves a token for federation. The token is +for the Amazon Connect user which corresponds to the IAM credentials that were used to +invoke this action. For more information about how SAML sign-in works in Amazon Connect, +see Configure SAML with IAM for Amazon Connect in the Amazon Connect Administrator Guide. +This API doesn't support root users. If you try to invoke GetFederationToken with root +credentials, an error message similar to the following one appears: Provided identity: +Principal: .... User: .... cannot be used for federation with Amazon Connect # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -3989,13 +5357,57 @@ function get_federation_token( ) end +""" + get_flow_association(instance_id, resource_id, resource_type) + get_flow_association(instance_id, resource_id, resource_type, params::Dict{String,<:Any}) + +Retrieves the flow associated for a given resource. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `resource_id`: The identifier of the resource. +- `resource_type`: A valid resource type. + +""" +function get_flow_association( + InstanceId, ResourceId, ResourceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/flow-associations/$(InstanceId)/$(ResourceId)/$(ResourceType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_association( + InstanceId, + ResourceId, + ResourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/flow-associations/$(InstanceId)/$(ResourceId)/$(ResourceType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_metric_data(end_time, filters, historical_metrics, instance_id, start_time) get_metric_data(end_time, filters, historical_metrics, instance_id, start_time, params::Dict{String,<:Any}) Gets historical metric data from the specified Amazon Connect instance. For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect -Administrator Guide. +Administrator Guide. We recommend using the GetMetricDataV2 API. It provides more +flexibility, features, and the ability to query longer time ranges than GetMetricData. Use +it to retrieve historical agent and contact metrics for the last 3 months, at varying +intervals. You can also use it to build custom dashboards to measure historical queue and +agent performance. For example, you can track the number of incoming contacts for the last +7 days, with data split by day, to see how contact volume changed per day of the week. # Arguments - `end_time`: The timestamp, in UNIX Epoch time format, at which to end the reporting @@ -4005,8 +5417,9 @@ Administrator Guide. - `filters`: The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. - VOICE, CHAT, and TASK channels are supported. To filter by Queues, enter the queue ID/ARN, - not the name of the queue. + VOICE, CHAT, and TASK channels are supported. RoutingStepExpression is not a valid filter + for GetMetricData and we recommend switching to GetMetricDataV2 for more up-to-date + features. To filter by Queues, enter the queue ID/ARN, not the name of the queue. - `historical_metrics`: The metrics to retrieve. Specify the name, unit, and statistic for each metric. The following historical metrics are available. For a description of each metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide. This @@ -4041,7 +5454,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Groupings"`: The grouping applied to the metrics returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values returned apply to the metrics for each queue rather than aggregated for all queues. If no grouping is - specified, a summary of metrics for all queues is returned. + specified, a summary of metrics for all queues is returned. RoutingStepExpression is not a + valid filter for GetMetricData and we recommend switching to GetMetricDataV2 for more + up-to-date features. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. @@ -4104,86 +5519,287 @@ Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 o features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical -data for the last 35 days, in 24-hour intervals. For a description of the historical +data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics -definitions in the Amazon Connect Administrator's Guide. +definitions in the Amazon Connect Administrator Guide. # Arguments - `end_time`: The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be later than the - start time timestamp. It cannot be later than the current timestamp. The time range between - the start and end time must be less than 24 hours. + start time timestamp. It cannot be later than the current timestamp. - `filters`: The filters to apply to returned metrics. You can filter on the following - resources: Queues Routing profiles Agents Channels User hierarchy groups At - least one filter must be passed from queues, routing profiles, agents, or user hierarchy - groups. To filter by phone number, see Create a historical metrics report in the Amazon - Connect Administrator's Guide. Note the following limits: Filter keys: A maximum of 5 - filter keys are supported in a single request. Valid filter keys: QUEUE | ROUTING_PROFILE | - AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | - AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE - Filter values: A maximum of 100 filter values are supported in a single request. For - example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing - profiles for a total of 100 filter values. VOICE, CHAT, and TASK are valid filterValue for - the CHANNEL filter key. + resources: Agents Channels Feature Queues Routing profiles Routing step + expression User hierarchy groups At least one filter must be passed from queues, + routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a + historical metrics report in the Amazon Connect Administrator Guide. Note the following + limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid + filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | + AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | + FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | + FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | + ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: + A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are + valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 + filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, + and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. + contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It + is available only to contacts analyzed by Contact Lens conversational analytics. + connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue + examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. + ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This + filter is case and order sensitive. JSON string fields must be sorted in ascending order + and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only + valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that + had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did + not have Amazon Q in Connect enabled as part of the flow This filter is available only + for contact record-driven metrics. - `metrics`: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, - see Historical metrics definitions in the Amazon Connect Administrator's Guide. - AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where - Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy AGENT_NON_RESPONSE - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, - Agent, Agent Hierarchy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon - Web Services Regions where Forecasting, capacity planning, and scheduling is available. - Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services - Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds - Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing - Profile, Agent, Agent Hierarchy AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + see Historical metrics definitions in the Amazon Connect Administrator Guide. + ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in + Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is + available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, + Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: + Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time + AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy UI name: Agent non-response + AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available + starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer + abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is + available only in Amazon Web Services Regions where Forecasting, capacity planning, and + scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is + available only in Amazon Web Services Regions where Forecasting, capacity planning, and + scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds + Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue + abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time + AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD + Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after + contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy AVG_HOLD_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy AVG_INTERACTION_TIME Unit: Seconds Valid - groupings and filters: Queue, Channel, Routing Profile AVG_QUEUE_ANSWER_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile CONTACTS_ABANDONED + Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level + Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in + Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required + filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS + UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required + filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS + UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration + Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: + Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: + Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: + Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, + Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource + ID, Initiation method, Resource published timestamp UI name: Average flow time + AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle + time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds + Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer + hold time Feature is a valid filter but not a valid grouping. + AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is + a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds + Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction + and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: + INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction + time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This + metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid - groupings and filters: Queue, Channel, Routing Profile CONTACTS_HANDLED Unit: Count Valid - metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_HOLD_ABANDONS Unit: - Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, - Agent, Agent Hierarchy CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_TRANSFERRED_OUT_BY_AGENT + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent + interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts + analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption + time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time + Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds + Valid groupings and filters: Queue, Channel, Routing Profile, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time + AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens + conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for + contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time + AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens + conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter + key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: + Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| + Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: + Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts answered + in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD + Valid groupings and filters: Queue, Channel, Routing Profile, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature + is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric + filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API + contacts handled Feature is a valid filter but not a valid grouping. + CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: + INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled + (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect + CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent + disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts + hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put + on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts + transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued + CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, + Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued + (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved + in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Feature, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out + Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy MAX_QUEUED_TIME Unit: Seconds - Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent - Valid groupings and filters: Queue, Channel, Routing Profile Threshold: For ThresholdValue, - enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must - enter LT (for \"Less than\"). SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and - filters: Queue, Channel, Routing Profile Threshold: For ThresholdValue, enter any whole + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts + transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out + queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings + and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: + Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow + type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows + outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: + Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows + resource ID, Initiation method, Resource published timestamp UI name: Flows started + MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Maximum flow time + MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI + name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: + Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, + Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource + ID, Initiation method, Resource published timestamp UI name: Minimum flow time + PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN + Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on + first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: + Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: + Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available + PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid + groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows + module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome + type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows + outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. + PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens + conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only + for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent + PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent + PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent + REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings + and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS + Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: + CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up + to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, + Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for - \"Less than\"). SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile Threshold: For ThresholdValue, enter any whole number from - 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\"). - SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid - groupings and filters: Queue, Channel, Routing Profile SUM_RETRY_CALLBACK_ATTEMPTS Unit: - Count Valid groupings and filters: Queue, Channel, Routing Profile + \"Less than\"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid + groupings and filters: Queue, RoutingStepExpression UI name: Not available + SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time + SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This + metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | + CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is + not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time + SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected + SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: + Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in + Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings + and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time + SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold + time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time + SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds + Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time + SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, + Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback + attempts - `resource_arn`: The Amazon Resource Name (ARN) of the resource. This includes the instanceId an Amazon Connect instance. - `start_time`: The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be before the end time - timestamp. The time range between the start and end time must be less than 24 hours. The - start time cannot be earlier than 35 days before the time of the request. Historical - metrics are available for 35 days. + timestamp. The start and end time depends on the IntervalPeriod selected. By default the + time range between start and end time is 35 days. Historical metrics are available for 3 + months. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4191,9 +5807,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: - QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | - AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | - AGENT_HIERARCHY_LEVEL_FIVE + AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE + | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS + | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | + FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | + Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | + ROUTING_STEP_EXPRESSION +- `"Interval"`: The interval period and timezone to apply to returned metrics. + IntervalPeriod: An aggregated grouping applied to request metrics. Valid IntervalPeriod + values are: FIFTEEN_MIN | THIRTY_MIN | HOUR | DAY | WEEK | TOTAL. For example, if + IntervalPeriod is selected THIRTY_MIN, StartTime and EndTime differs by 1 day, then Amazon + Connect returns 48 results in the response. Each result is aggregated by the THIRTY_MIN + period. By default Amazon Connect aggregates results based on the TOTAL interval period. + The following list describes restrictions on StartTime and EndTime based on which + IntervalPeriod is requested. FIFTEEN_MIN: The difference between StartTime and EndTime + must be less than 3 days. THIRTY_MIN: The difference between StartTime and EndTime must + be less than 3 days. HOUR: The difference between StartTime and EndTime must be less + than 3 days. DAY: The difference between StartTime and EndTime must be less than 35 + days. WEEK: The difference between StartTime and EndTime must be less than 35 days. + TOTAL: The difference between StartTime and EndTime must be less than 35 days. + TimeZone: The timezone applied to requested metrics. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. @@ -4335,7 +5968,9 @@ end Retrieves the current traffic distribution for a given traffic distribution group. # Arguments -- `id`: The identifier of the traffic distribution group. +- `id`: The identifier of the traffic distribution group. This can be the ID or the ARN if + the API is being called in the Region where the traffic distribution group was created. The + ARN must be provided if the call is from the replicated Region. """ function get_traffic_distribution(Id; aws_config::AbstractAWSConfig=global_aws_config()) @@ -4358,6 +5993,70 @@ function get_traffic_distribution( ) end +""" + import_phone_number(instance_id, source_phone_number_arn) + import_phone_number(instance_id, source_phone_number_arn, params::Dict{String,<:Any}) + +Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an +Amazon Connect instance. You can call this API only in the same Amazon Web Services Region +where the Amazon Connect instance was created. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `source_phone_number_arn`: The claimed phone number ARN being imported from the external + service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of + the phone number to import from Amazon Pinpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"PhoneNumberDescription"`: The description of the phone number. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function import_phone_number( + InstanceId, SourcePhoneNumberArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/phone-number/import", + Dict{String,Any}( + "InstanceId" => InstanceId, + "SourcePhoneNumberArn" => SourcePhoneNumberArn, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_phone_number( + InstanceId, + SourcePhoneNumberArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/phone-number/import", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceId" => InstanceId, + "SourcePhoneNumberArn" => SourcePhoneNumberArn, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_agent_statuses(instance_id) list_agent_statuses(instance_id, params::Dict{String,<:Any}) @@ -4398,6 +6097,48 @@ function list_agent_statuses( ) end +""" + list_analytics_data_associations(instance_id) + list_analytics_data_associations(instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. Lists the +association status of requested dataset ID for a given Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DataSetId"`: The identifier of the dataset to get the association status. +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_analytics_data_associations( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/analytics-data/instance/$(InstanceId)/association"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_analytics_data_associations( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/analytics-data/instance/$(InstanceId)/association", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_approved_origins(instance_id) list_approved_origins(instance_id, params::Dict{String,<:Any}) @@ -4621,6 +6362,8 @@ end This API is in preview release for Amazon Connect and is subject to change. For the specified referenceTypes, returns a list of references associated with the contact. +References are links to documents that are related to a contact, such as emails, +attachments, or URLs. # Arguments - `contact_id`: The identifier of the initial contact. @@ -4787,6 +6530,47 @@ function list_evaluation_forms( ) end +""" + list_flow_associations(instance_id) + list_flow_associations(instance_id, params::Dict{String,<:Any}) + +List the flow association based on the filters. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceType"`: A valid resource type. +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_flow_associations( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/flow-associations-summary/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_associations( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/flow-associations-summary/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_hours_of_operations(instance_id) list_hours_of_operations(instance_id, params::Dict{String,<:Any}) @@ -4958,6 +6742,7 @@ specified Amazon Connect instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"integrationArn"`: The Amazon Resource Name (ARN) of the integration. - `"integrationType"`: The integration type. - `"maxResults"`: The maximum number of results to return per page. - `"nextToken"`: The token for the next set of results. Use the value returned in the @@ -5076,7 +6861,10 @@ end Provides information about the phone numbers for the specified Amazon Connect instance. For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center -in the Amazon Connect Administrator Guide. The phone number Arn value that is returned +in the Amazon Connect Administrator Guide. We recommend using ListPhoneNumbersV2 to +return phone number types. ListPhoneNumbers doesn't support number types UIFN, SHARED, +THIRD_PARTY_TF, and THIRD_PARTY_DID. While it returns numbers of those types, it +incorrectly lists them as TOLL_FREE or DID. The phone number Arn value that is returned from each of the items in the PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail with a ResourceNotFoundException. Instead, use the ListPhoneNumbersV2 API. It returns the new phone number ARN that can be used to tag phone @@ -5093,7 +6881,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - `"phoneNumberCountryCodes"`: The ISO country code. -- `"phoneNumberTypes"`: The type of phone number. +- `"phoneNumberTypes"`: The type of phone number. We recommend using ListPhoneNumbersV2 to + return phone number types. While ListPhoneNumbers returns number types UIFN, SHARED, + THIRD_PARTY_TF, and THIRD_PARTY_DID, it incorrectly lists them as TOLL_FREE or DID. """ function list_phone_numbers(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) return connect( @@ -5125,10 +6915,18 @@ Lists phone numbers claimed to your Amazon Connect instance or traffic distribut If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with traffic distribution group. For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the -Amazon Connect Administrator Guide. +Amazon Connect Administrator Guide. When given an instance ARN, ListPhoneNumbersV2 +returns only the phone numbers claimed to the instance. When given a traffic distribution +group ARN ListPhoneNumbersV2 returns only the phone numbers claimed to the traffic +distribution group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceId"`: The identifier of the Amazon Connect instance that phone numbers are + claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + If both TargetArn and InstanceId are not provided, this API lists numbers claimed to all + the Amazon Connect instances belonging to your account in the same AWS Region as the + request. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. @@ -5137,9 +6935,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys part of the country code. - `"PhoneNumberTypes"`: The type of phone number. - `"TargetArn"`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic - distribution groups that phone numbers are claimed to. If TargetArn input is not provided, - this API lists numbers claimed to all the Amazon Connect instances belonging to your - account in the same Amazon Web Services Region as the request. + distribution groups that phone number inbound traffic is routed through. If both TargetArn + and InstanceId input are not provided, this API lists numbers claimed to all the Amazon + Connect instances belonging to your account in the same Amazon Web Services Region as the + request. """ function list_phone_numbers_v2(; aws_config::AbstractAWSConfig=global_aws_config()) return connect( @@ -5158,6 +6957,46 @@ function list_phone_numbers_v2( ) end +""" + list_predefined_attributes(instance_id) + list_predefined_attributes(instance_id, params::Dict{String,<:Any}) + +Lists predefined attributes for the specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_predefined_attributes( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/predefined-attributes/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_predefined_attributes( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/predefined-attributes/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_prompts(instance_id) list_prompts(instance_id, params::Dict{String,<:Any}) @@ -5296,31 +7135,92 @@ Provides information about the quick connects for the specified Amazon Connect i # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"QuickConnectTypes"`: The type of quick connect. In the Amazon Connect console, when you - create a quick connect, you are prompted to assign one of the following types: Agent - (USER), External (PHONE_NUMBER), or Queue (QUEUE). +- `"QuickConnectTypes"`: The type of quick connect. In the Amazon Connect admin website, + when you create a quick connect, you are prompted to assign one of the following types: + Agent (USER), External (PHONE_NUMBER), or Queue (QUEUE). - `"maxResults"`: The maximum number of results to return per page. The default MaxResult size is 100. - `"nextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ -function list_quick_connects(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) +function list_quick_connects(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "GET", + "/quick-connects/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_quick_connects( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/quick-connects/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_realtime_contact_analysis_segments_v2(contact_id, instance_id, output_type, segment_types) + list_realtime_contact_analysis_segments_v2(contact_id, instance_id, output_type, segment_types, params::Dict{String,<:Any}) + +Provides a list of analysis segments for a real-time analysis session. + +# Arguments +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `output_type`: The Contact Lens output type to be returned. +- `segment_types`: Enum with segment types . Each value corresponds to a segment type + returned in the segments list of the API. Each segment type has its own structure. + Different channels may have different sets of supported segment types. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_realtime_contact_analysis_segments_v2( + ContactId, + InstanceId, + OutputType, + SegmentTypes; + aws_config::AbstractAWSConfig=global_aws_config(), +) return connect( - "GET", - "/quick-connects/$(InstanceId)"; + "POST", + "/contact/list-real-time-analysis-segments-v2/$(InstanceId)/$(ContactId)", + Dict{String,Any}("OutputType" => OutputType, "SegmentTypes" => SegmentTypes); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_quick_connects( +function list_realtime_contact_analysis_segments_v2( + ContactId, InstanceId, + OutputType, + SegmentTypes, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "GET", - "/quick-connects/$(InstanceId)", - params; + "POST", + "/contact/list-real-time-analysis-segments-v2/$(InstanceId)/$(ContactId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "OutputType" => OutputType, "SegmentTypes" => SegmentTypes + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5491,12 +7391,53 @@ function list_security_keys( ) end +""" + list_security_profile_applications(instance_id, security_profile_id) + list_security_profile_applications(instance_id, security_profile_id, params::Dict{String,<:Any}) + +Returns a list of third-party applications in a specific security profile. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `security_profile_id`: The identifier for the security profle. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_security_profile_applications( + InstanceId, SecurityProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/security-profiles-applications/$(InstanceId)/$(SecurityProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_security_profile_applications( + InstanceId, + SecurityProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/security-profiles-applications/$(InstanceId)/$(SecurityProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_security_profile_permissions(instance_id, security_profile_id) list_security_profile_permissions(instance_id, security_profile_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Lists the -permissions granted to a security profile. +Lists the permissions granted to a security profile. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -5659,6 +7600,48 @@ function list_task_templates( ) end +""" + list_traffic_distribution_group_users(traffic_distribution_group_id) + list_traffic_distribution_group_users(traffic_distribution_group_id, params::Dict{String,<:Any}) + +Lists traffic distribution group users. + +# Arguments +- `traffic_distribution_group_id`: The identifier of the traffic distribution group. This + can be the ID or the ARN if the API is being called in the Region where the traffic + distribution group was created. The ARN must be provided if the call is from the replicated + Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_traffic_distribution_group_users( + TrafficDistributionGroupId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_traffic_distribution_group_users( + TrafficDistributionGroupId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/traffic-distribution-group/$(TrafficDistributionGroupId)/user", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_traffic_distribution_groups() list_traffic_distribution_groups(params::Dict{String,<:Any}) @@ -5780,6 +7763,48 @@ function list_user_hierarchy_groups( ) end +""" + list_user_proficiencies(instance_id, user_id) + list_user_proficiencies(instance_id, user_id, params::Dict{String,<:Any}) + +Lists proficiencies associated with a user. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `user_id`: The identifier of the user account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_user_proficiencies( + InstanceId, UserId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/users/$(InstanceId)/$(UserId)/proficiencies"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_user_proficiencies( + InstanceId, + UserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/users/$(InstanceId)/$(UserId)/proficiencies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_users(instance_id) list_users(instance_id, params::Dict{String,<:Any}) @@ -5819,6 +7844,91 @@ function list_users( ) end +""" + list_view_versions(instance_id, view_id) + list_view_versions(instance_id, view_id, params::Dict{String,<:Any}) + +Returns all the available versions for the specified Amazon Connect instance and view +identifier. Results will be sorted from highest to lowest. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. The default MaxResult + size is 100. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_view_versions( + InstanceId, ViewId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/views/$(InstanceId)/$(ViewId)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_view_versions( + InstanceId, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/views/$(InstanceId)/$(ViewId)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_views(instance_id) + list_views(instance_id, params::Dict{String,<:Any}) + +Returns views in the given instance. Results are sorted primarily by type, and secondarily +by name. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. The default MaxResult + size is 100. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"type"`: The type of the view. +""" +function list_views(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "GET", + "/views/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_views( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/views/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ monitor_contact(contact_id, instance_id, user_id) monitor_contact(contact_id, instance_id, user_id, params::Dict{String,<:Any}) @@ -5830,51 +7940,99 @@ specified by userId will be set to silent monitoring mode on the contact. - `contact_id`: The identifier of the contact. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. -- `user_id`: The identifier of the user account. +- `user_id`: The identifier of the user account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedMonitorCapabilities"`: Specify which monitoring actions the user is allowed to + take. For example, whether the user is allowed to escalate from silent monitoring to barge. + AllowedMonitorCapabilities is required if barge is enabled. +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +""" +function monitor_contact( + ContactId, InstanceId, UserId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/contact/monitor", + Dict{String,Any}( + "ContactId" => ContactId, + "InstanceId" => InstanceId, + "UserId" => UserId, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function monitor_contact( + ContactId, + InstanceId, + UserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/monitor", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactId" => ContactId, + "InstanceId" => InstanceId, + "UserId" => UserId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + pause_contact(contact_id, instance_id) + pause_contact(contact_id, instance_id, params::Dict{String,<:Any}) + +Allows pausing an ongoing task contact. + +# Arguments +- `contact_id`: The identifier of the contact. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AllowedMonitorCapabilities"`: Specify which monitoring actions the user is allowed to - take. For example, whether the user is allowed to escalate from silent monitoring to barge. -- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If not provided, the Amazon Web Services SDK populates this - field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"ContactFlowId"`: The identifier of the flow. """ -function monitor_contact( - ContactId, InstanceId, UserId; aws_config::AbstractAWSConfig=global_aws_config() +function pause_contact( + ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( "POST", - "/contact/monitor", - Dict{String,Any}( - "ContactId" => ContactId, - "InstanceId" => InstanceId, - "UserId" => UserId, - "ClientToken" => string(uuid4()), - ); + "/contact/pause", + Dict{String,Any}("ContactId" => ContactId, "InstanceId" => InstanceId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function monitor_contact( +function pause_contact( ContactId, InstanceId, - UserId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( "POST", - "/contact/monitor", + "/contact/pause", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "ContactId" => ContactId, - "InstanceId" => InstanceId, - "UserId" => UserId, - "ClientToken" => string(uuid4()), - ), + Dict{String,Any}("ContactId" => ContactId, "InstanceId" => InstanceId), params, ), ); @@ -5934,10 +8092,20 @@ end Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You can call this API only in the Amazon Web Services Region where the number was claimed. To release phone numbers from a traffic distribution group, use the -ReleasePhoneNumber API, not the Amazon Connect console. After releasing a phone number, the -phone number enters into a cooldown period of 30 days. It cannot be searched for or claimed -again until the period has ended. If you accidentally release a phone number, contact -Amazon Web Services Support. +ReleasePhoneNumber API, not the Amazon Connect admin website. After releasing a phone +number, the phone number enters into a cooldown period for up to 180 days. It cannot be +searched for or claimed again until the period has ended. If you accidentally release a +phone number, contact Amazon Web Services Support. If you plan to claim and release +numbers frequently, contact us for a service quota exception. Otherwise, it is possible you +will be blocked from claiming and releasing any more numbers until up to 180 days past the +oldest number released has expired. By default you can claim and release up to 200% of your +maximum number of active phone numbers. If you claim and release phone numbers using the UI +or API during a rolling 180 day cycle that exceeds 200% of your phone number service level +quota, you will be blocked from claiming any more numbers until 180 days past the oldest +number released has expired. For example, if you already have 99 claimed numbers and a +service level quota of 99 phone numbers, and in any 180 day period you release 99, claim +99, and then release 99, you will have exceeded the 200% limit. At that point you are +blocked from claiming any more numbers until you open an Amazon Web Services support ticket. # Arguments - `phone_number_id`: A unique identifier for the phone number. @@ -5979,9 +8147,10 @@ end replicate_instance(instance_id, replica_alias, replica_region) replicate_instance(instance_id, replica_alias, replica_region, params::Dict{String,<:Any}) -Replicates an Amazon Connect instance in the specified Amazon Web Services Region. For more -information about replicating an Amazon Connect instance, see Create a replica of your -existing Amazon Connect instance in the Amazon Connect Administrator Guide. +Replicates an Amazon Connect instance in the specified Amazon Web Services Region and +copies configuration information for Amazon Connect resources across Amazon Web Services +Regions. For more information about replicating an Amazon Connect instance, see Create a +replica of your existing Amazon Connect instance in the Amazon Connect Administrator Guide. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6041,13 +8210,61 @@ function replicate_instance( ) end +""" + resume_contact(contact_id, instance_id) + resume_contact(contact_id, instance_id, params::Dict{String,<:Any}) + +Allows resuming a task contact in a paused state. + +# Arguments +- `contact_id`: The identifier of the contact. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ContactFlowId"`: The identifier of the flow. +""" +function resume_contact( + ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/contact/resume", + Dict{String,Any}("ContactId" => ContactId, "InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function resume_contact( + ContactId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/resume", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ContactId" => ContactId, "InstanceId" => InstanceId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ resume_contact_recording(contact_id, initial_contact_id, instance_id) resume_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) When a contact is being recorded, and the recording has been suspended using -SuspendContactRecording, this API resumes recording the call. Only voice recordings are -supported at this time. +SuspendContactRecording, this API resumes recording whatever recording is selected in the +flow configuration: call, screen, or both. If only call recording or only screen recording +is enabled, then it would resume. Voice and screen recordings are supported. # Arguments - `contact_id`: The identifier of the contact. @@ -6102,8 +8319,8 @@ function resume_contact_recording( end """ - search_available_phone_numbers(phone_number_country_code, phone_number_type, target_arn) - search_available_phone_numbers(phone_number_country_code, phone_number_type, target_arn, params::Dict{String,<:Any}) + search_available_phone_numbers(phone_number_country_code, phone_number_type) + search_available_phone_numbers(phone_number_country_code, phone_number_type, params::Dict{String,<:Any}) Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you @@ -6113,21 +8330,24 @@ distribution group. # Arguments - `phone_number_country_code`: The ISO country code. - `phone_number_type`: The type of phone number. -- `target_arn`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic - distribution groups that phone numbers are claimed to. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceId"`: The identifier of the Amazon Connect instance that phone numbers are + claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + You must enter InstanceId or TargetArn. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - `"PhoneNumberPrefix"`: The prefix of the phone number. If provided, it must contain + as part of the country code. +- `"TargetArn"`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic + distribution groups that phone number inbound traffic is routed through. You must enter + InstanceId or TargetArn. """ function search_available_phone_numbers( PhoneNumberCountryCode, - PhoneNumberType, - TargetArn; + PhoneNumberType; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( @@ -6136,7 +8356,6 @@ function search_available_phone_numbers( Dict{String,Any}( "PhoneNumberCountryCode" => PhoneNumberCountryCode, "PhoneNumberType" => PhoneNumberType, - "TargetArn" => TargetArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -6145,7 +8364,6 @@ end function search_available_phone_numbers( PhoneNumberCountryCode, PhoneNumberType, - TargetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -6158,7 +8376,6 @@ function search_available_phone_numbers( Dict{String,Any}( "PhoneNumberCountryCode" => PhoneNumberCountryCode, "PhoneNumberType" => PhoneNumberType, - "TargetArn" => TargetArn, ), params, ), @@ -6168,6 +8385,151 @@ function search_available_phone_numbers( ) end +""" + search_contact_flow_modules(instance_id) + search_contact_flow_modules(instance_id, params::Dict{String,<:Any}) + +Searches the flow modules in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return contact flow modules. The + name and description fields support \"contains\" queries with a minimum of 2 characters and + a maximum of 25 characters. Any queries with character lengths outside of this range will + result in invalid results. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_contact_flow_modules( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-contact-flow-modules", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_contact_flow_modules( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-contact-flow-modules", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_contact_flows(instance_id) + search_contact_flows(instance_id, params::Dict{String,<:Any}) + +Searches the contact flows in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return flows. The name and + description fields support \"contains\" queries with a minimum of 2 characters and a + maximum of 25 characters. Any queries with character lengths outside of this range will + result in invalid results. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_contact_flows(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "POST", + "/search-contact-flows", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_contact_flows( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-contact-flows", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_contacts(instance_id, time_range) + search_contacts(instance_id, time_range, params::Dict{String,<:Any}) + +Searches contacts in an Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of Amazon Connect instance. You can find the instance ID in + the Amazon Resource Name (ARN) of the instance. +- `time_range`: Time range that you want to search results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return contacts. +- `"Sort"`: Specifies a field to sort by and a sort order. +""" +function search_contacts( + InstanceId, TimeRange; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-contacts", + Dict{String,Any}("InstanceId" => InstanceId, "TimeRange" => TimeRange); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_contacts( + InstanceId, + TimeRange, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-contacts", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstanceId" => InstanceId, "TimeRange" => TimeRange), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_hours_of_operations(instance_id) search_hours_of_operations(instance_id, params::Dict{String,<:Any}) @@ -6213,6 +8575,50 @@ function search_hours_of_operations( ) end +""" + search_predefined_attributes(instance_id) + search_predefined_attributes(instance_id, params::Dict{String,<:Any}) + +Predefined attributes that meet certain criteria. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return predefined attributes. +""" +function search_predefined_attributes( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-predefined-attributes", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_predefined_attributes( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-predefined-attributes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_prompts(instance_id) search_prompts(instance_id, params::Dict{String,<:Any}) @@ -6260,8 +8666,7 @@ end search_queues(instance_id) search_queues(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches queues -in an Amazon Connect instance, with optional filtering. +Searches queues in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6307,39 +8712,84 @@ end search_quick_connects(instance_id) search_quick_connects(instance_id, params::Dict{String,<:Any}) -Searches quick connects in an Amazon Connect instance, with optional filtering. +Searches quick connects in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return quick connects. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_quick_connects( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-quick-connects", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_quick_connects( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-quick-connects", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_resource_tags(instance_id) + search_resource_tags(instance_id, params::Dict{String,<:Any}) + +Searches tags used in an Amazon Connect instance using optional search criteria. # Arguments -- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance - ID in the Amazon Resource Name (ARN) of the instance. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the Amazon Resource Name (ARN) of the instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. -- `"SearchCriteria"`: The search criteria to be used to return quick connects. -- `"SearchFilter"`: Filters to be applied to search results. +- `"ResourceTypes"`: The list of resource types to be used to search tags from. If not + provided or if any empty list is provided, this API will search from all supported resource + types. +- `"SearchCriteria"`: The search criteria to be used to return tags. """ -function search_quick_connects( - InstanceId; aws_config::AbstractAWSConfig=global_aws_config() -) +function search_resource_tags(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) return connect( "POST", - "/search-quick-connects", + "/search-resource-tags", Dict{String,Any}("InstanceId" => InstanceId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function search_quick_connects( +function search_resource_tags( InstanceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( "POST", - "/search-quick-connects", + "/search-resource-tags", Dict{String,Any}( mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) ); @@ -6352,8 +8802,7 @@ end search_routing_profiles(instance_id) search_routing_profiles(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches -routing profiles in an Amazon Connect instance, with optional filtering. +Searches routing profiles in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6401,8 +8850,7 @@ end search_security_profiles(instance_id) search_security_profiles(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches -security profiles in an Amazon Connect instance, with optional filtering. +Searches security profiles in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6447,34 +8895,45 @@ function search_security_profiles( end """ - search_users() - search_users(params::Dict{String,<:Any}) + search_users(instance_id) + search_users(instance_id, params::Dict{String,<:Any}) Searches users in an Amazon Connect instance, with optional filtering. AfterContactWorkTimeLimit is returned in milliseconds. +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. InstanceID is a required field. The + \"Required: No\" below is incorrect. + # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"InstanceId"`: The identifier of the Amazon Connect instance. You can find the instance - ID in the Amazon Resource Name (ARN) of the instance. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - `"SearchCriteria"`: - `"SearchFilter"`: Filters to be applied to search results. """ -function search_users(; aws_config::AbstractAWSConfig=global_aws_config()) +function search_users(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) return connect( - "POST", "/search-users"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "POST", + "/search-users", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function search_users( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( "POST", "/search-users", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -6523,6 +8982,151 @@ function search_vocabularies( ) end +""" + send_chat_integration_event(destination_id, event, source_id) + send_chat_integration_event(destination_id, event, source_id, params::Dict{String,<:Any}) + +Processes chat integration events from Amazon Web Services or external integrations to +Amazon Connect. A chat integration event includes: SourceId, DestinationId, and Subtype: +a set of identifiers, uniquely representing a chat ChatEvent: details of the chat action +to perform such as sending a message, event, or disconnecting from a chat When a chat +integration event is sent with chat identifiers that do not map to an active chat contact, +a new chat contact is also created before handling chat action. Access to this API is +currently restricted to Amazon Pinpoint for supporting SMS integration. + +# Arguments +- `destination_id`: Chat system identifier, used in part to uniquely identify chat. This is + associated with the Amazon Connect instance and flow to be used to start chats. For SMS, + this is the phone number destination of inbound SMS messages represented by an Amazon + Pinpoint phone number ARN. +- `event`: Chat integration event payload +- `source_id`: External identifier of chat customer participant, used in part to uniquely + identify a chat. For SMS, this is the E164 phone number of the chat customer participant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NewSessionDetails"`: Contact properties to apply when starting a new chat. If the + integration event is handled with an existing chat, this is ignored. +- `"Subtype"`: Classification of a channel. This is used in part to uniquely identify chat. + Valid value: [\"connect:sms\"] +""" +function send_chat_integration_event( + DestinationId, Event, SourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/chat-integration-event", + Dict{String,Any}( + "DestinationId" => DestinationId, "Event" => Event, "SourceId" => SourceId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_chat_integration_event( + DestinationId, + Event, + SourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/chat-integration-event", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DestinationId" => DestinationId, + "Event" => Event, + "SourceId" => SourceId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn) + start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn, params::Dict{String,<:Any}) + +Provides a pre-signed Amazon S3 URL in response for uploading your content. You may only +use this API to upload attachments to a Connect Case. + +# Arguments +- `file_name`: A case-sensitive name of the attached file being uploaded. +- `file_size_in_bytes`: The size of the attached file in bytes. +- `file_use_case_type`: The use case for the file. +- `instance_id`: The unique identifier of the Connect instance. +- `associated_resource_arn`: The resource to which the attached file is (being) uploaded + to. Cases are the only current supported resource. This value must be a valid ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"CreatedBy"`: Represents the identity that created the file. +- `"Tags"`: The tags used to organize, track, or control access for this resource. For + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +- `"UrlExpiryInSeconds"`: Optional override for the expiry of the pre-signed S3 URL in + seconds. The default value is 300. +""" +function start_attached_file_upload( + FileName, + FileSizeInBytes, + FileUseCaseType, + InstanceId, + associatedResourceArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/attached-files/$(InstanceId)", + Dict{String,Any}( + "FileName" => FileName, + "FileSizeInBytes" => FileSizeInBytes, + "FileUseCaseType" => FileUseCaseType, + "associatedResourceArn" => associatedResourceArn, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_attached_file_upload( + FileName, + FileSizeInBytes, + FileUseCaseType, + InstanceId, + associatedResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/attached-files/$(InstanceId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FileName" => FileName, + "FileSizeInBytes" => FileSizeInBytes, + "FileUseCaseType" => FileUseCaseType, + "associatedResourceArn" => associatedResourceArn, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_chat_contact(contact_flow_id, instance_id, participant_details) start_chat_contact(contact_flow_id, instance_id, participant_details, params::Dict{String,<:Any}) @@ -6542,10 +9146,10 @@ Administrator Guide. # Arguments - `contact_flow_id`: The identifier of the flow for initiating the chat. To see the - ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to - Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, - choose Show additional flow information. The ContactFlowId is the last part of the ARN, - shown here in bold: + ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, + Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show + additional flow information. The ContactFlowId is the last part of the ARN, shown here in + bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6564,13 +9168,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. -- `"InitialMessage"`: The initial message to be sent to the newly created chat. +- `"InitialMessage"`: The initial message to be sent to the newly created chat. If you have + a Lex bot in your flow, the initial message is not delivered to the Lex bot. - `"PersistentChat"`: Enable persistent chats. For more information about enabling persistent chat, and for example use cases and how to configure for them, see Enable persistent chat. - `"RelatedContactId"`: The unique identifier for an Amazon Connect contact. This identifier is related to the chat starting. You cannot provide data for both RelatedContactId and PersistentChat. +- `"SegmentAttributes"`: A set of system defined key-value pairs stored on individual + contact segments using an attribute map. The attributes are standard Amazon Connect + attributes. They can be accessed in flows. Attribute keys can include only alphanumeric, -, + and _. This field can be used to show channel subtype, such as connect:Guide. The types + application/vnd.amazonaws.connect.message.interactive and + application/vnd.amazonaws.connect.message.interactive.response must be present in the + SupportedMessagingContentTypes field of this API in order to set SegmentAttributes as { + \"connect:Subtype\": {\"valueString\" : \"connect:Guide\" }}. - `"SupportedMessagingContentTypes"`: The supported chat message content types. Supported types are text/plain, text/markdown, application/json, application/vnd.amazonaws.connect.message.interactive, and @@ -6851,10 +9464,10 @@ to the quota Amazon Connect campaigns. # Arguments - `contact_flow_id`: The identifier of the flow for the outbound call. To see the - ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to - Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, - choose Show additional flow information. The ContactFlowId is the last part of the ARN, - shown here in bold: + ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, + Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show + additional flow information. The ContactFlowId is the last part of the ARN, shown here in + bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx - `destination_phone_number`: The phone number of the customer, in E.164 format. @@ -6875,10 +9488,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys field. For more information about idempotency, see Making retries safe with idempotent APIs. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned. +- `"Description"`: A description of the voice contact that is shown to an agent in the + Contact Control Panel (CCP). +- `"Name"`: The name of a voice contact that is shown to an agent in the Contact Control + Panel (CCP). - `"QueueId"`: The queue for the call. If you specify a queue, the phone displayed for caller ID is the phone number specified in the queue. If you do not specify a queue, the queue defined in the flow is used. If you do not specify a queue, you must specify a source phone number. +- `"References"`: A formatted URL that is shown to an agent in the Contact Control Panel + (CCP). Contacts can have the following reference types at the time of creation: URL | + NUMBER | STRING | DATE | EMAIL. ATTACHMENT is not a supported reference type during voice + contact creation. +- `"RelatedContactId"`: The contactId that is related to this contact. Linking voice, task, + or chat by using RelatedContactID copies over contact attributes from the related contact + to the new contact. All updates to user-defined attributes in the new contact are limited + to the individual contact ID. There are no limits to the number of contacts that can be + linked by using RelatedContactId. - `"SourcePhoneNumber"`: The phone number associated with the Amazon Connect instance, in E.164 format. If you do not specify a source phone number, you must specify a queue. - `"TrafficType"`: Denotes the class of traffic. Calls with different traffic types are @@ -6935,7 +9561,25 @@ end start_task_contact(instance_id, name) start_task_contact(instance_id, name, params::Dict{String,<:Any}) -Initiates a flow to start a new task. +Initiates a flow to start a new task contact. For more information about task contacts, see +Concepts: Tasks in Amazon Connect in the Amazon Connect Administrator Guide. When using +PreviousContactId and RelatedContactId input parameters, note the following: +PreviousContactId Any updates to user-defined task contact attributes on any contact +linked through the same PreviousContactId will affect every contact in the chain. There +can be a maximum of 12 linked task contacts in a chain. That is, 12 task contacts can be +created that share the same PreviousContactId. RelatedContactId Copies contact +attributes from the related task contact to the new contact. Any update on attributes in +a new task contact does not update attributes on previous contact. There’s no limit on +the number of task contacts that can be created that use the same RelatedContactId. In +addition, when calling StartTaskContact include only one of these parameters: +ContactFlowID, QuickConnectID, or TaskTemplateID. Only one parameter is required as long as +the task template has a flow configured to run it. If more than one parameter is specified, +or only the TaskTemplateID is specified but it does not have a flow configured, the request +returns an error because Amazon Connect cannot identify the unique flow to run when the +task is created. A ServiceQuotaExceededException occurs when the number of open tasks +exceeds the active tasks quota or there are already 12 tasks referencing the same +PreviousContactId. For more information about service quotas for task contacts, see Amazon +Connect service quotas in the Amazon Connect Administrator Guide. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6952,23 +9596,35 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"ContactFlowId"`: The identifier of the flow for initiating the tasks. To see the - ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to - Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, - choose Show additional flow information. The ContactFlowId is the last part of the ARN, - shown here in bold: + ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, + Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show + additional flow information. The ContactFlowId is the last part of the ARN, shown here in + bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx - `"Description"`: A description of the task that is shown to an agent in the Contact Control Panel (CCP). -- `"PreviousContactId"`: The identifier of the previous chat, voice, or task contact. -- `"QuickConnectId"`: The identifier for the quick connect. +- `"PreviousContactId"`: The identifier of the previous chat, voice, or task contact. Any + updates to user-defined attributes to task contacts linked using the same PreviousContactID + will affect every contact in the chain. There can be a maximum of 12 linked task contacts + in a chain. +- `"QuickConnectId"`: The identifier for the quick connect. Tasks that are created by using + QuickConnectId will use the flow that is defined on agent or queue quick connect. For more + information about quick connects, see Create quick connects. - `"References"`: A formatted URL that is shown to an agent in the Contact Control Panel - (CCP). -- `"RelatedContactId"`: The contactId that is related to this contact. + (CCP). Tasks can have the following reference types at the time of creation: URL | NUMBER | + STRING | DATE | EMAIL. ATTACHMENT is not a supported reference type during task creation. +- `"RelatedContactId"`: The contactId that is related to this contact. Linking tasks + together by using RelatedContactID copies over contact attributes from the related task + contact to the new task contact. All updates to user-defined attributes in the new task + contact are limited to the individual contact ID, unlike what happens when tasks are linked + by using PreviousContactID. There are no limits to the number of contacts that can be + linked by using RelatedContactId. - `"ScheduledTime"`: The timestamp, in Unix Epoch seconds format, at which to start running the inbound flow. The scheduled time cannot be in the past. It must be within up to 6 days in future. -- `"TaskTemplateId"`: A unique identifier for the task template. +- `"TaskTemplateId"`: A unique identifier for the task template. For more information about + task templates, see Create task templates in the Amazon Connect Administrator Guide. """ function start_task_contact( InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -7008,18 +9664,110 @@ function start_task_contact( ) end +""" + start_web_rtccontact(contact_flow_id, instance_id, participant_details) + start_web_rtccontact(contact_flow_id, instance_id, participant_details, params::Dict{String,<:Any}) + +Places an inbound in-app, web, or video call to a contact, and then initiates the flow. It +performs the actions in the flow that are specified (in ContactFlowId) and present in the +Amazon Connect instance (specified as InstanceId). + +# Arguments +- `contact_flow_id`: The identifier of the flow for the call. To see the ContactFlowId in + the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. + Choose the flow. On the flow page, under the name of the flow, choose Show additional flow + information. The ContactFlowId is the last part of the ARN, shown here in bold: + arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact + -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `participant_details`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedCapabilities"`: Information about the video sharing capabilities of the + participants (customer, agent). +- `"Attributes"`: A custom key-value pair using an attribute map. The attributes are + standard Amazon Connect attributes, and can be accessed in flows just like any other + contact attributes. There can be up to 32,768 UTF-8 bytes across all key-value pairs per + contact. Attribute keys can include only alphanumeric, -, and _ characters. +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent + APIs. The token is valid for 7 days after creation. If a contact is already started, the + contact ID is returned. +- `"Description"`: A description of the task that is shown to an agent in the Contact + Control Panel (CCP). +- `"References"`: A formatted URL that is shown to an agent in the Contact Control Panel + (CCP). Tasks can have the following reference types at the time of creation: URL | NUMBER | + STRING | DATE | EMAIL. ATTACHMENT is not a supported reference type during task creation. +- `"RelatedContactId"`: The unique identifier for an Amazon Connect contact. This + identifier is related to the contact starting. +""" +function start_web_rtccontact( + ContactFlowId, + InstanceId, + ParticipantDetails; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact/webrtc", + Dict{String,Any}( + "ContactFlowId" => ContactFlowId, + "InstanceId" => InstanceId, + "ParticipantDetails" => ParticipantDetails, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_web_rtccontact( + ContactFlowId, + InstanceId, + ParticipantDetails, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact/webrtc", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactFlowId" => ContactFlowId, + "InstanceId" => InstanceId, + "ParticipantDetails" => ParticipantDetails, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_contact(contact_id, instance_id) stop_contact(contact_id, instance_id, params::Dict{String,<:Any}) -Ends the specified contact. This call does not work for the following initiation methods: -DISCONNECT TRANSFER QUEUE_TRANSFER +Ends the specified contact. Use this API to stop queued callbacks. It does not work for +voice contacts that use the following initiation methods: DISCONNECT TRANSFER +QUEUE_TRANSFER EXTERNAL_OUTBOUND MONITOR Chat and task contacts can be terminated in +any state, regardless of initiation method. # Arguments - `contact_id`: The ID of the contact. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DisconnectReason"`: The reason a contact can be disconnected. Only Amazon Connect + outbound campaigns can provide this field. """ function stop_contact( ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -7222,11 +9970,13 @@ end suspend_contact_recording(contact_id, initial_contact_id, instance_id) suspend_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) -When a contact is being recorded, this API suspends recording the call. For example, you -might suspend the call recording while collecting sensitive information, such as a credit -card number. Then use ResumeContactRecording to restart recording. The period of time that -the recording is suspended is filled with silence in the final recording. Only voice -recordings are supported at this time. +When a contact is being recorded, this API suspends recording whatever is selected in the +flow configuration: call, screen, or both. If only call recording or only screen recording +is enabled, then it would be suspended. For example, you might suspend the screen recording +while collecting sensitive information, such as a credit card number. Then use +ResumeContactRecording to restart recording the screen. The period of time that the +recording is suspended is filled with silence in the final recording. Voice and screen +recordings are supported. # Arguments - `contact_id`: The identifier of the contact. @@ -7280,6 +10030,58 @@ function suspend_contact_recording( ) end +""" + tag_contact(contact_id, instance_id, tags) + tag_contact(contact_id, instance_id, tags, params::Dict{String,<:Any}) + +Adds the specified tags to the contact resource. For more information about this API is +used, see Set up granular billing for a detailed view of your Amazon Connect usage. + +# Arguments +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `tags`: The tags to be assigned to the contact resource. For example, { \"Tags\": + {\"key1\":\"value1\", \"key2\":\"value2\"} }. Authorization is not supported by this tag. + +""" +function tag_contact( + ContactId, InstanceId, Tags; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/contact/tags", + Dict{String,Any}( + "ContactId" => ContactId, "InstanceId" => InstanceId, "Tags" => Tags + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_contact( + ContactId, + InstanceId, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/tags", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactId" => ContactId, "InstanceId" => InstanceId, "Tags" => Tags + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -7293,7 +10095,7 @@ Identity-Based Policy Examples in the Amazon Connect Administrator Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. - `tags`: The tags used to organize, track, or control access for this resource. For - example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -7346,7 +10148,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"QueueId"`: The identifier for the queue. -- `"UserId"`: The identifier for the user. +- `"UserId"`: The identifier for the user. This can be the ID or the ARN of the user. """ function transfer_contact( ContactFlowId, ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -7364,28 +10166,70 @@ function transfer_contact( feature_set=SERVICE_FEATURE_SET, ) end -function transfer_contact( - ContactFlowId, +function transfer_contact( + ContactFlowId, + ContactId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contact/transfer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContactFlowId" => ContactFlowId, + "ContactId" => ContactId, + "InstanceId" => InstanceId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_contact(contact_id, instance_id, tag_keys) + untag_contact(contact_id, instance_id, tag_keys, params::Dict{String,<:Any}) + +Removes the specified tags from the contact resource. For more information about this API +is used, see Set up granular billing for a detailed view of your Amazon Connect usage. + +# Arguments +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `tag_keys`: A list of tag keys. Existing tags on the contact whose keys are members of + this list will be removed. + +""" +function untag_contact( + ContactId, InstanceId, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/contact/tags/$(InstanceId)/$(ContactId)", + Dict{String,Any}("TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_contact( ContactId, InstanceId, + TagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return connect( - "POST", - "/contact/transfer", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ContactFlowId" => ContactFlowId, - "ContactId" => ContactId, - "InstanceId" => InstanceId, - "ClientToken" => string(uuid4()), - ), - params, - ), - ); + "DELETE", + "/contact/tags/$(InstanceId)/$(ContactId)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("TagKeys" => TagKeys), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -7540,7 +10384,13 @@ Administrator Guide. - `attributes`: The Amazon Connect attributes. These attributes can be accessed in flows just like any other contact attributes. You can have up to 32,768 UTF-8 bytes across all attributes for a contact. Attribute keys can include only alphanumeric, dash, and - underscore characters. + underscore characters. When the attributes for a contact exceed 32 KB, the contact is + routed down the Error branch of the flow. As a mitigation, consider the following options: + Remove unnecessary attributes by setting their values to empty. If the attributes are + only used in one flow and don't need to be referred to outside of that flow (for example, + by a Lambda or another flow), then use flow attributes. This way you aren't needlessly + persisting the 32 KB of information from one flow to another. For more information, see + Flow block: Set contact attributes in the Amazon Connect Administrator Guide. - `initial_contact_id`: The identifier of the contact. This is the identifier of the contact associated with the first interaction with the contact center. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -7640,12 +10490,15 @@ end update_contact_flow_content(contact_flow_id, content, instance_id, params::Dict{String,<:Any}) Updates the specified flow. You can also create and update flows using the Amazon Connect -Flow language. +Flow language. Use the SAVED alias in the request to describe the SAVED content of a Flow. +For example, arn:aws:.../contact-flow/{id}:SAVED. Once a contact flow is published, SAVED +needs to be supplied to view saved content that has not been published. # Arguments - `contact_flow_id`: The identifier of the flow. -- `content`: The JSON string that represents flow's content. For an example, see Example - contact flow in Amazon Connect Flow language. +- `content`: The JSON string that represents the content of the flow. For an example, see + Example flow in Amazon Connect Flow language. Length Constraints: Minimum length of 1. + Maximum length of 256000. - `instance_id`: The identifier of the Amazon Connect instance. """ @@ -7722,11 +10575,15 @@ end update_contact_flow_module_content(contact_flow_module_id, content, instance_id) update_contact_flow_module_content(contact_flow_module_id, content, instance_id, params::Dict{String,<:Any}) -Updates specified flow module for the specified Amazon Connect instance. +Updates specified flow module for the specified Amazon Connect instance. Use the SAVED +alias in the request to describe the SAVED content of a Flow. For example, +arn:aws:.../contact-flow/{id}:SAVED. Once a contact flow is published, SAVED needs to be +supplied to view saved content that has not been published. # Arguments - `contact_flow_module_id`: The identifier of the flow module. -- `content`: The content of the flow module. +- `content`: The JSON string that represents the content of the flow. For an example, see + Example flow in Amazon Connect Flow language. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -7844,6 +10701,61 @@ function update_contact_flow_name( ) end +""" + update_contact_routing_data(contact_id, instance_id) + update_contact_routing_data(contact_id, instance_id, params::Dict{String,<:Any}) + +Updates routing priority and age on the contact (QueuePriority and +QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position +in the queue. For example, you can move a contact to the back of the queue by setting a +lower routing priority relative to other contacts in queue; or you can move a contact to +the front of the queue by increasing the routing age which will make the contact look +artificially older and therefore higher up in the first-in-first-out routing order. Note +that adjusting the routing age of a contact affects only its position in queue, and not its +actual queue wait time as reported through metrics. These properties can also be updated by +using the Set routing priority / age flow block. Either QueuePriority or +QueueTimeAdjustmentInSeconds should be provided within the request body, but not both. + +# Arguments +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"QueuePriority"`: Priority of the contact in the queue. The default priority for new + contacts is 5. You can raise the priority of a contact compared to other contacts in the + queue by assigning them a higher priority, such as 1 or 2. +- `"QueueTimeAdjustmentSeconds"`: The number of seconds to add or subtract from the + contact's routing age. Contacts are routed to agents on a first-come, first-serve basis. + This means that changing their amount of time in queue compared to others also changes + their position in queue. +""" +function update_contact_routing_data( + ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/contacts/$(InstanceId)/$(ContactId)/routing-data"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_contact_routing_data( + ContactId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/contacts/$(InstanceId)/$(ContactId)/routing-data", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_contact_schedule(contact_id, instance_id, scheduled_time) update_contact_schedule(contact_id, instance_id, scheduled_time, params::Dict{String,<:Any}) @@ -8174,8 +11086,8 @@ function update_participant_role_config( end """ - update_phone_number(phone_number_id, target_arn) - update_phone_number(phone_number_id, target_arn, params::Dict{String,<:Any}) + update_phone_number(phone_number_id) + update_phone_number(phone_number_id, params::Dict{String,<:Any}) Updates your claimed phone number from its current Amazon Connect instance or traffic distribution group to another Amazon Connect instance or traffic distribution group in the @@ -8188,29 +11100,32 @@ UpdatePhoneNumber operation. # Arguments - `phone_number_id`: A unique identifier for the phone number. -- `target_arn`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic - distribution groups that phone numbers are claimed to. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"InstanceId"`: The identifier of the Amazon Connect instance that phone numbers are + claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + You must enter InstanceId or TargetArn. +- `"TargetArn"`: The Amazon Resource Name (ARN) for Amazon Connect instances or traffic + distribution groups that phone number inbound traffic is routed through. You must enter + InstanceId or TargetArn. """ function update_phone_number( - PhoneNumberId, TargetArn; aws_config::AbstractAWSConfig=global_aws_config() + PhoneNumberId; aws_config::AbstractAWSConfig=global_aws_config() ) return connect( "PUT", "/phone-number/$(PhoneNumberId)", - Dict{String,Any}("TargetArn" => TargetArn, "ClientToken" => string(uuid4())); + Dict{String,Any}("ClientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_phone_number( PhoneNumberId, - TargetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -8218,19 +11133,97 @@ function update_phone_number( "PUT", "/phone-number/$(PhoneNumberId)", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "TargetArn" => TargetArn, "ClientToken" => string(uuid4()) - ), - params, - ), + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_phone_number_metadata(phone_number_id) + update_phone_number_metadata(phone_number_id, params::Dict{String,<:Any}) + +Updates a phone number’s metadata. To verify the status of a previous +UpdatePhoneNumberMetadata operation, call the DescribePhoneNumber API. + +# Arguments +- `phone_number_id`: The Amazon Resource Name (ARN) or resource ID of the phone number. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"PhoneNumberDescription"`: The description of the phone number. +""" +function update_phone_number_metadata( + PhoneNumberId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "PUT", + "/phone-number/$(PhoneNumberId)/metadata", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_phone_number_metadata( + PhoneNumberId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/phone-number/$(PhoneNumberId)/metadata", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end +""" + update_predefined_attribute(instance_id, name) + update_predefined_attribute(instance_id, name, params::Dict{String,<:Any}) + +Updates a predefined attribute for the specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `name`: The name of the predefined attribute. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Values"`: The values of the predefined attribute. +""" +function update_predefined_attribute( + InstanceId, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/predefined-attributes/$(InstanceId)/$(Name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_predefined_attribute( + InstanceId, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/predefined-attributes/$(InstanceId)/$(Name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_prompt(instance_id, prompt_id) update_prompt(instance_id, prompt_id, params::Dict{String,<:Any}) @@ -8246,7 +11239,8 @@ Updates a prompt. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A description of the prompt. - `"Name"`: The name of the prompt. -- `"S3Uri"`: The URI for the S3 bucket where the prompt is stored. +- `"S3Uri"`: The URI for the S3 bucket where the prompt is stored. You can provide S3 + pre-signed URLs returned by the GetPromptFile API instead of providing S3 URIs. """ function update_prompt( InstanceId, PromptId; aws_config::AbstractAWSConfig=global_aws_config() @@ -8410,15 +11404,19 @@ end update_queue_outbound_caller_config(instance_id, outbound_caller_config, queue_id, params::Dict{String,<:Any}) This API is in preview release for Amazon Connect and is subject to change. Updates the -outbound caller ID name, number, and outbound whisper flow for a specified queue. If the -number being used in the input is claimed to a traffic distribution group, and you are -calling this API using an instance in the Amazon Web Services Region where the traffic -distribution group was created, you can use either a full phone number ARN or UUID value -for the OutboundCallerIdNumberId value of the OutboundCallerConfig request body parameter. -However, if the number is claimed to a traffic distribution group and you are calling this -API using an instance in the alternate Amazon Web Services Region associated with the -traffic distribution group, you must provide a full phone number ARN. If a UUID is provided -in this scenario, you will receive a ResourceNotFoundException. +outbound caller ID name, number, and outbound whisper flow for a specified queue. If the +phone number is claimed to a traffic distribution group that was created in the same Region +as the Amazon Connect instance where you are calling this API, then you can use a full +phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is +claimed to a traffic distribution group that is in one Region, and you are calling this API +from an instance in another Amazon Web Services Region that is associated with the traffic +distribution group, you must provide a full phone number ARN. If a UUID is provided in this +scenario, you will receive a ResourceNotFoundException. Only use the phone number ARN +format that doesn't contain instance in the path, for example, +arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is +returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to +allow/deny access to this API for phone number resources claimed to a traffic distribution +group, see Allow or Deny queue API actions for phone numbers in a replica Region. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -8594,6 +11592,58 @@ function update_quick_connect_name( ) end +""" + update_routing_profile_agent_availability_timer(agent_availability_timer, instance_id, routing_profile_id) + update_routing_profile_agent_availability_timer(agent_availability_timer, instance_id, routing_profile_id, params::Dict{String,<:Any}) + +Whether agents with this routing profile will have their routing order calculated based on +time since their last inbound contact or longest idle time. + +# Arguments +- `agent_availability_timer`: Whether agents with this routing profile will have their + routing order calculated based on time since their last inbound contact or longest idle + time. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `routing_profile_id`: The identifier of the routing profile. + +""" +function update_routing_profile_agent_availability_timer( + AgentAvailabilityTimer, + InstanceId, + RoutingProfileId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)/agent-availability-timer", + Dict{String,Any}("AgentAvailabilityTimer" => AgentAvailabilityTimer); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_routing_profile_agent_availability_timer( + AgentAvailabilityTimer, + InstanceId, + RoutingProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)/agent-availability-timer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AgentAvailabilityTimer" => AgentAvailabilityTimer), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_routing_profile_concurrency(instance_id, media_concurrencies, routing_profile_id) update_routing_profile_concurrency(instance_id, media_concurrencies, routing_profile_id, params::Dict{String,<:Any}) @@ -8856,8 +11906,7 @@ end update_security_profile(instance_id, security_profile_id) update_security_profile(instance_id, security_profile_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Updates a -security profile. +Updates a security profile. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -8866,9 +11915,14 @@ security profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedAccessControlHierarchyGroupId"`: The identifier of the hierarchy group that a + security profile uses to restrict access to resources in Amazon Connect. - `"AllowedAccessControlTags"`: The list of tags that a security profile uses to restrict access to resources in Amazon Connect. +- `"Applications"`: A list of the third-party application's metadata. - `"Description"`: The description of the security profile. +- `"HierarchyRestrictedResources"`: The list of resources that a security profile applies + hierarchy restrictions to in Amazon Connect. Following are acceptable ResourceNames: User. - `"Permissions"`: The permissions granted to a security profile. For a list of valid permissions, see List of security profile permissions. - `"TagRestrictedResources"`: The list of resources that a security profile applies tag @@ -8955,9 +12009,13 @@ end update_traffic_distribution(id) update_traffic_distribution(id, params::Dict{String,<:Any}) -Updates the traffic distribution for a given traffic distribution group. For more -information about updating a traffic distribution group, see Update telephony traffic -distribution across Amazon Web Services Regions in the Amazon Connect Administrator Guide. +Updates the traffic distribution for a given traffic distribution group. The SignInConfig +distribution is available only on a default TrafficDistributionGroup (see the IsDefault +parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution +with a modified SignInConfig and a non-default TrafficDistributionGroup, an +InvalidRequestException is returned. For more information about updating a traffic +distribution group, see Update telephony traffic distribution across Amazon Web Services +Regions in the Amazon Connect Administrator Guide. # Arguments - `id`: The identifier of the traffic distribution group. This can be the ID or the ARN if @@ -8966,6 +12024,9 @@ distribution across Amazon Web Services Regions in the Amazon Connect Administr # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AgentConfig"`: The distribution of agents between the instance and its replica(s). +- `"SignInConfig"`: The distribution that determines which Amazon Web Services Regions + should be used to sign in agents in to both the instance and its replica(s). - `"TelephonyConfig"`: The distribution of traffic between the instance and its replica(s). """ function update_traffic_distribution(Id; aws_config::AbstractAWSConfig=global_aws_config()) @@ -9199,6 +12260,51 @@ function update_user_phone_config( ) end +""" + update_user_proficiencies(instance_id, user_id, user_proficiencies) + update_user_proficiencies(instance_id, user_id, user_proficiencies, params::Dict{String,<:Any}) + +Updates the properties associated with the proficiencies of a user. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `user_id`: The identifier of the user account. +- `user_proficiencies`: The proficiencies to be updated for the user. Proficiencies must + first be associated to the user. You can do this using AssociateUserProficiencies API. + +""" +function update_user_proficiencies( + InstanceId, UserId, UserProficiencies; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/users/$(InstanceId)/$(UserId)/proficiencies", + Dict{String,Any}("UserProficiencies" => UserProficiencies); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_user_proficiencies( + InstanceId, + UserId, + UserProficiencies, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/users/$(InstanceId)/$(UserId)/proficiencies", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("UserProficiencies" => UserProficiencies), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_user_routing_profile(instance_id, routing_profile_id, user_id) update_user_routing_profile(instance_id, routing_profile_id, user_id, params::Dict{String,<:Any}) @@ -9289,3 +12395,96 @@ function update_user_security_profiles( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_view_content(content, instance_id, status, view_id) + update_view_content(content, instance_id, status, view_id, params::Dict{String,<:Any}) + +Updates the view content of the given view identifier in the specified Amazon Connect +instance. It performs content validation if Status is set to SAVED and performs full +content validation if Status is PUBLISHED. Note that the SAVED alias' content will always +be updated, but the LATEST alias' content will only be updated if Status is PUBLISHED. + +# Arguments +- `content`: View content containing all content necessary to render a view except for + runtime input data and the runtime input schema, which is auto-generated by this operation. + The total uncompressed content has a maximum file size of 400kB. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `status`: Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status + will initiate validation on the content. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. + +""" +function update_view_content( + Content, InstanceId, Status, ViewId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/views/$(InstanceId)/$(ViewId)", + Dict{String,Any}("Content" => Content, "Status" => Status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_view_content( + Content, + InstanceId, + Status, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/views/$(InstanceId)/$(ViewId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("Content" => Content, "Status" => Status), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_view_metadata(instance_id, view_id) + update_view_metadata(instance_id, view_id, params::Dict{String,<:Any}) + +Updates the view metadata. Note that either Name or Description must be provided. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. +- `view_id`: The identifier of the view. Both ViewArn and ViewId can be used. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the view. +- `"Name"`: The name of the view. +""" +function update_view_metadata( + InstanceId, ViewId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/views/$(InstanceId)/$(ViewId)/metadata"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_view_metadata( + InstanceId, + ViewId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/views/$(InstanceId)/$(ViewId)/metadata", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/connectcases.jl b/src/services/connectcases.jl index 72ed9e2d6d..02563a9cfc 100644 --- a/src/services/connectcases.jl +++ b/src/services/connectcases.jl @@ -84,13 +84,16 @@ end create_case(domain_id, fields, template_id) create_case(domain_id, fields, template_id, params::Dict{String,<:Any}) -Creates a case in the specified Cases domain. Case system and custom fields are taken as an -array id/value pairs with a declared data types. The following fields are required when -creating a case: <ul> <li> <p> <code>customer_id</code> - -You must provide the full customer profile ARN in this format: -<code>arn:aws:profile:your AWS Region:your AWS account ID:domains/profiles domain -name/profiles/profile ID</code> </p> </li> <li> <p> -<code>title</code> </p> </li> </ul> </note> + If you provide a value for PerformedBy.UserArn you must also have connect:DescribeUser +permission on the User ARN resource that you provide <p>Creates a case in the +specified Cases domain. Case system and custom fields are taken as an array id/value pairs +with a declared data types.</p> <p>The following fields are required when +creating a case:</p> <ul> <li> <p> +<code>customer_id</code> - You must provide the full customer profile ARN in +this format: <code>arn:aws:profile:your_AWS_Region:your_AWS_account +ID:domains/your_profiles_domain_name/profiles/profile_ID</code> </p> +</li> <li> <p> <code>title</code> </p> </li> +</ul> # Arguments - `domain_id`: The unique identifier of the Cases domain. @@ -103,6 +106,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"performedBy"`: """ function create_case( domainId, fields, templateId; aws_config::AbstractAWSConfig=global_aws_config() @@ -151,7 +155,7 @@ Creates a domain, which is a container for all case data, such as cases, fields, and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully -associate the Cases domain. For more information, see Onboard to Cases. +associate the Cases domain. For more information, see Onboard to Cases. </important> # Arguments - `name`: The name for your Cases domain. It must be unique for your Amazon Web Services @@ -275,11 +279,13 @@ end create_related_item(case_id, content, domain_id, type) create_related_item(case_id, content, domain_id, type, params::Dict{String,<:Any}) -Creates a related item (comments, tasks, and contacts) and associates it with a case. A +Creates a related item (comments, tasks, and contacts) and associates it with a case. A Related Item is a resource that is associated with a case. It may or may not have an external identifier linking it to an external resource (for example, a contactArn). All Related Items have their own internal identifier, the relatedItemArn. Examples of related -items include comments and contacts. +items include comments and contacts. If you provide a value for performedBy.userArn you +must also have DescribeUser permission on the ARN of the user that you provide. +</note> # Arguments - `case_id`: A unique identifier of the case. @@ -287,6 +293,9 @@ items include comments and contacts. - `domain_id`: The unique identifier of the Cases domain. - `type`: The type of a related item. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"performedBy"`: Represents the creator of the related item. """ function create_related_item( caseId, content, domainId, type; aws_config::AbstractAWSConfig=global_aws_config() @@ -371,7 +380,11 @@ end delete_domain(domain_id) delete_domain(domain_id, params::Dict{String,<:Any}) -Deletes a domain. +Deletes a Cases domain. <note> <p>After deleting your domain you must +disassociate the deleted domain from your Amazon Connect instance with another API call +before being able to use Cases again with this Amazon Connect instance. See <a +href="https://docs.aws.amazon.com/connect/latest/APIReference/API_DeleteIntegrationAsso +ciation.html">DeleteIntegrationAssociation</a>.</p> </note> # Arguments - `domain_id`: The unique identifier of the Cases domain. @@ -399,6 +412,140 @@ function delete_domain( ) end +""" + delete_field(domain_id, field_id) + delete_field(domain_id, field_id, params::Dict{String,<:Any}) + +Deletes a field from a cases template. You can delete up to 100 fields per domain. After a +field is deleted: You can still retrieve the field by calling BatchGetField. You cannot +update a deleted field by calling UpdateField; it throws a ValidationException. Deleted +fields are not included in the ListFields response. Calling CreateCase with a deleted +field throws a ValidationException denoting which field IDs in the request have been +deleted. Calling GetCase with a deleted field ID returns the deleted field's value if one +exists. Calling UpdateCase with a deleted field ID throws a ValidationException if the +case does not already contain a value for the deleted field. Otherwise it succeeds, +allowing you to update or remove (using emptyValue: {}) the field's value from the case. +GetTemplate does not return field IDs for deleted fields. GetLayout does not return +field IDs for deleted fields. Calling SearchCases with the deleted field ID as a filter +returns any cases that have a value for the deleted field that matches the filter criteria. + Calling SearchCases with a searchTerm value that matches a deleted field's value on a +case returns the case in the response. Calling BatchPutFieldOptions with a deleted field +ID throw a ValidationException. Calling GetCaseEventConfiguration does not return field +IDs for deleted fields. + +# Arguments +- `domain_id`: The unique identifier of the Cases domain. +- `field_id`: Unique identifier of the field. + +""" +function delete_field(domainId, fieldId; aws_config::AbstractAWSConfig=global_aws_config()) + return connectcases( + "DELETE", + "/domains/$(domainId)/fields/$(fieldId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_field( + domainId, + fieldId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connectcases( + "DELETE", + "/domains/$(domainId)/fields/$(fieldId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_layout(domain_id, layout_id) + delete_layout(domain_id, layout_id, params::Dict{String,<:Any}) + +Deletes a layout from a cases template. You can delete up to 100 layouts per domain. +<p>After a layout is deleted:</p> <ul> <li> <p>You can still +retrieve the layout by calling <code>GetLayout</code>.</p> </li> +<li> <p>You cannot update a deleted layout by calling +<code>UpdateLayout</code>; it throws a +<code>ValidationException</code>.</p> </li> <li> +<p>Deleted layouts are not included in the <code>ListLayouts</code> +response.</p> </li> </ul> + +# Arguments +- `domain_id`: The unique identifier of the Cases domain. +- `layout_id`: The unique identifier of the layout. + +""" +function delete_layout( + domainId, layoutId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connectcases( + "DELETE", + "/domains/$(domainId)/layouts/$(layoutId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_layout( + domainId, + layoutId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connectcases( + "DELETE", + "/domains/$(domainId)/layouts/$(layoutId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_template(domain_id, template_id) + delete_template(domain_id, template_id, params::Dict{String,<:Any}) + +Deletes a cases template. You can delete up to 100 templates per domain. <p>After a +cases template is deleted:</p> <ul> <li> <p>You can still retrieve +the template by calling <code>GetTemplate</code>.</p> </li> +<li> <p>You cannot update the template. </p> </li> <li> +<p>You cannot create a case by using the deleted template.</p> </li> +<li> <p>Deleted templates are not included in the +<code>ListTemplates</code> response.</p> </li> </ul> + +# Arguments +- `domain_id`: The unique identifier of the Cases domain. +- `template_id`: A unique identifier of a template. + +""" +function delete_template( + domainId, templateId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connectcases( + "DELETE", + "/domains/$(domainId)/templates/$(templateId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_template( + domainId, + templateId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connectcases( + "DELETE", + "/domains/$(domainId)/templates/$(templateId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_case(case_id, domain_id, fields) get_case(case_id, domain_id, fields, params::Dict{String,<:Any}) @@ -442,6 +589,48 @@ function get_case( ) end +""" + get_case_audit_events(case_id, domain_id) + get_case_audit_events(case_id, domain_id, params::Dict{String,<:Any}) + +Returns the audit history about a specific case if it exists. + +# Arguments +- `case_id`: A unique identifier of the case. +- `domain_id`: The unique identifier of the Cases domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of audit events to return. The current maximum + supported value is 25. This is also the default when no other value is provided. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function get_case_audit_events( + caseId, domainId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connectcases( + "POST", + "/domains/$(domainId)/cases/$(caseId)/audit-history"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_case_audit_events( + caseId, + domainId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connectcases( + "POST", + "/domains/$(domainId)/cases/$(caseId)/audit-history", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_case_event_configuration(domain_id) get_case_event_configuration(domain_id, params::Dict{String,<:Any}) @@ -839,7 +1028,8 @@ end put_case_event_configuration(domain_id, event_bridge) put_case_event_configuration(domain_id, event_bridge, params::Dict{String,<:Any}) -API for adding case event publishing configuration +Adds case event publishing configuration. For a complete list of fields you can add to the +event message, see Create case fields in the Amazon Connect Administrator Guide # Arguments - `domain_id`: The unique identifier of the Cases domain. @@ -1039,9 +1229,11 @@ end update_case(case_id, domain_id, fields) update_case(case_id, domain_id, fields, params::Dict{String,<:Any}) -Updates the values of fields on a case. Fields to be updated are received as an array of -id/value pairs identical to the CreateCase input . If the action is successful, the service -sends back an HTTP 200 response with an empty HTTP body. + If you provide a value for PerformedBy.UserArn you must also have connect:DescribeUser +permission on the User ARN resource that you provide <p>Updates the values of +fields on a case. Fields to be updated are received as an array of id/value pairs identical +to the <code>CreateCase</code> input .</p> <p>If the action is +successful, the service sends back an HTTP 200 response with an empty HTTP body.</p> # Arguments - `case_id`: A unique identifier of the case. @@ -1049,6 +1241,9 @@ sends back an HTTP 200 response with an empty HTTP body. - `fields`: An array of objects with fieldId (matching ListFields/DescribeField) and value union data, structured identical to CreateCase. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"performedBy"`: """ function update_case( caseId, domainId, fields; aws_config::AbstractAWSConfig=global_aws_config() @@ -1131,7 +1326,7 @@ layouts because they are not configurable. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"content"`: Information about which fields will be present in the layout, the order of - the fields, and a read-only attribute of the field. + the fields. - `"name"`: The name of the layout. It must be unique per domain. """ function update_layout( diff --git a/src/services/connectparticipant.jl b/src/services/connectparticipant.jl index aa5f2a2ea1..50df610420 100644 --- a/src/services/connectparticipant.jl +++ b/src/services/connectparticipant.jl @@ -9,8 +9,9 @@ using AWS.UUIDs complete_attachment_upload(attachment_ids, client_token, x-_amz-_bearer, params::Dict{String,<:Any}) Allows you to confirm that the attachment has been uploaded using the pre-signed URL -provided in StartAttachmentUpload API. ConnectionToken is used for invoking this API -instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use +provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment +with that identifier is already being uploaded. ConnectionToken is used for invoking this +API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments @@ -95,8 +96,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ConnectParticipant"`: Amazon Connect Participant is used to mark the participant as connected for customer participant in message streaming, as well as for agent or manager participant in non-streaming chats. -- `"Type"`: Type of connection information required. This can be omitted if - ConnectParticipant is true. +- `"Type"`: Type of connection information required. If you need CONNECTION_CREDENTIALS + along with marking participant as connected, pass CONNECTION_CREDENTIALS in Type. """ function create_participant_connection( X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() @@ -131,6 +132,52 @@ function create_participant_connection( ) end +""" + describe_view(view_token, x-_amz-_bearer) + describe_view(view_token, x-_amz-_bearer, params::Dict{String,<:Any}) + +Retrieves the view for the specified view token. + +# Arguments +- `view_token`: An encrypted token originating from the interactive message of a ShowView + block operation. Represents the desired view. +- `x-_amz-_bearer`: The connection token. + +""" +function describe_view( + ViewToken, X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() +) + return connectparticipant( + "GET", + "/participant/views/$(ViewToken)", + Dict{String,Any}("headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_view( + ViewToken, + X_Amz_Bearer, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connectparticipant( + "GET", + "/participant/views/$(ViewToken)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disconnect_participant(x-_amz-_bearer) disconnect_participant(x-_amz-_bearer, params::Dict{String,<:Any}) @@ -243,9 +290,16 @@ end Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable -persistent chat. ConnectionToken is used for invoking this API instead of -ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version -4 authentication. +persistent chat. If you have a process that consumes events in the transcript of an chat +that has ended, note that chat transcripts contain the following event content types if the +event has occurred during the chat session: +application/vnd.amazonaws.connect.event.participant.left +application/vnd.amazonaws.connect.event.participant.joined +application/vnd.amazonaws.connect.event.chat.ended +application/vnd.amazonaws.connect.event.transfer.succeeded +application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for +invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs +do not use Signature Version 4 authentication. # Arguments - `x-_amz-_bearer`: The authentication token associated with the participant's connection. @@ -297,15 +351,20 @@ end send_event(content_type, x-_amz-_bearer) send_event(content_type, x-_amz-_bearer, params::Dict{String,<:Any}) -Sends an event. ConnectionToken is used for invoking this API instead of + The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no +longer be supported starting December 31, 2024. This event has been migrated to the +CreateParticipantConnection API using the ConnectParticipant field. Sends an event. +Message receipts are not supported when there are more than two active participants in the +chat. Using the SendEvent API for message receipts when a supervisor is barged-in will +result in a conflict exception. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments - `content_type`: The content type of the request. Supported types are: application/vnd.amazonaws.connect.event.typing - application/vnd.amazonaws.connect.event.connection.acknowledged - application/vnd.amazonaws.connect.event.message.delivered + application/vnd.amazonaws.connect.event.connection.acknowledged (will be deprecated on + December 31, 2024) application/vnd.amazonaws.connect.event.message.delivered application/vnd.amazonaws.connect.event.message.read - `x-_amz-_bearer`: The authentication token associated with the participant's connection. diff --git a/src/services/controlcatalog.jl b/src/services/controlcatalog.jl new file mode 100644 index 0000000000..78a7e27db8 --- /dev/null +++ b/src/services/controlcatalog.jl @@ -0,0 +1,95 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: controlcatalog +using AWS.Compat +using AWS.UUIDs + +""" + list_common_controls() + list_common_controls(params::Dict{String,<:Any}) + +Returns a paginated list of common controls from the Amazon Web Services Control Catalog. +You can apply an optional filter to see common controls that have a specific objective. If +you don’t provide a filter, the operation returns all common controls. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CommonControlFilter"`: An optional filter that narrows the results to a specific + objective. This filter allows you to specify one objective ARN at a time. Passing multiple + ARNs in the CommonControlFilter isn’t currently supported. +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_common_controls(; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", "/common-controls"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_common_controls( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controlcatalog( + "POST", + "/common-controls", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_domains() + list_domains(params::Dict{String,<:Any}) + +Returns a paginated list of domains from the Amazon Web Services Control Catalog. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_domains(; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", "/domains"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_domains( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controlcatalog( + "POST", "/domains", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_objectives() + list_objectives(params::Dict{String,<:Any}) + +Returns a paginated list of objectives from the Amazon Web Services Control Catalog. You +can apply an optional filter to see the objectives that belong to a specific domain. If you +don’t provide a filter, the operation returns all objectives. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ObjectiveFilter"`: An optional filter that narrows the results to a specific domain. + This filter allows you to specify one domain ARN at a time. Passing multiple ARNs in the + ObjectiveFilter isn’t currently supported. +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_objectives(; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", "/objectives"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_objectives( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controlcatalog( + "POST", + "/objectives", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/controltower.jl b/src/services/controltower.jl index 2bab386560..d5f92b0b2e 100644 --- a/src/services/controltower.jl +++ b/src/services/controltower.jl @@ -4,18 +4,157 @@ using AWS.AWSServices: controltower using AWS.Compat using AWS.UUIDs +""" + create_landing_zone(manifest, version) + create_landing_zone(manifest, version, params::Dict{String,<:Any}) + +Creates a new landing zone. This API call starts an asynchronous operation that creates and +configures a landing zone, based on the parameters specified in the manifest JSON file. + +# Arguments +- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services + resources. For examples, review Launch your landing zone. +- `version`: The landing zone version, for example, 3.0. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: Tags to be applied to the landing zone. +""" +function create_landing_zone( + manifest, version; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/create-landingzone", + Dict{String,Any}("manifest" => manifest, "version" => version); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_landing_zone( + manifest, + version, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/create-landingzone", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("manifest" => manifest, "version" => version), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_landing_zone(landing_zone_identifier) + delete_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) + +Decommissions a landing zone. This API call starts an asynchronous operation that deletes +Amazon Web Services Control Tower resources deployed in accounts managed by Amazon Web +Services Control Tower. + +# Arguments +- `landing_zone_identifier`: The unique identifier of the landing zone. + +""" +function delete_landing_zone( + landingZoneIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/delete-landingzone", + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_landing_zone( + landingZoneIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/delete-landingzone", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disable_baseline(enabled_baseline_identifier) + disable_baseline(enabled_baseline_identifier, params::Dict{String,<:Any}) + +Disable an EnabledBaseline resource on the specified Target. This API starts an +asynchronous operation to remove all resources deployed as part of the baseline enablement. +The resource will vary depending on the enabled baseline. For usage examples, see the +Amazon Web Services Control Tower User Guide . + +# Arguments +- `enabled_baseline_identifier`: Identifier of the EnabledBaseline resource to be + deactivated, in ARN format. + +""" +function disable_baseline( + enabledBaselineIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/disable-baseline", + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_baseline( + enabledBaselineIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/disable-baseline", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disable_control(control_identifier, target_identifier) disable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources -will vary according to the control that you specify. +will vary according to the control that you specify. For usage examples, see the Amazon +Web Services Control Tower User Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective - controls are permitted, with the exception of the Region deny guardrail. -- `target_identifier`: The ARN of the organizational unit. + controls are permitted, with the exception of the Region deny control. For information on + how to find the controlIdentifier, see the overview page. +- `target_identifier`: The ARN of the organizational unit. For information on how to find + the targetIdentifier, see the overview page. """ function disable_control( @@ -55,19 +194,91 @@ function disable_control( ) end +""" + enable_baseline(baseline_identifier, baseline_version, target_identifier) + enable_baseline(baseline_identifier, baseline_version, target_identifier, params::Dict{String,<:Any}) + +Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy +resources specified by the Baseline to the specified Target. For usage examples, see the +Amazon Web Services Control Tower User Guide . + +# Arguments +- `baseline_identifier`: The ARN of the baseline to be enabled. +- `baseline_version`: The specific version to be enabled of the specified baseline. +- `target_identifier`: The ARN of the target on which the baseline will be enabled. Only + OUs are supported as targets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: A list of key-value objects that specify enablement parameters, where key + is a string and value is a document of any type. +- `"tags"`: Tags associated with input to EnableBaseline. +""" +function enable_baseline( + baselineIdentifier, + baselineVersion, + targetIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/enable-baseline", + Dict{String,Any}( + "baselineIdentifier" => baselineIdentifier, + "baselineVersion" => baselineVersion, + "targetIdentifier" => targetIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_baseline( + baselineIdentifier, + baselineVersion, + targetIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/enable-baseline", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "baselineIdentifier" => baselineIdentifier, + "baselineVersion" => baselineVersion, + "targetIdentifier" => targetIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ enable_control(control_identifier, target_identifier) enable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) -This API call activates a control. It starts an asynchronous operation that creates AWS -resources on the specified organizational unit and the accounts it contains. The resources -created will vary according to the control that you specify. +This API call activates a control. It starts an asynchronous operation that creates Amazon +Web Services resources on the specified organizational unit and the accounts it contains. +The resources created will vary according to the control that you specify. For usage +examples, see the Amazon Web Services Control Tower User Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective - controls are permitted, with the exception of the Region deny guardrail. -- `target_identifier`: The ARN of the organizational unit. + controls are permitted, with the exception of the Region deny control. For information on + how to find the controlIdentifier, see the overview page. +- `target_identifier`: The ARN of the organizational unit. For information on how to find + the targetIdentifier, see the overview page. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: A list of input parameter values, which are specified to configure the + control when you enable it. +- `"tags"`: Tags to be applied to the EnabledControl resource. """ function enable_control( controlIdentifier, targetIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -106,12 +317,96 @@ function enable_control( ) end +""" + get_baseline(baseline_identifier) + get_baseline(baseline_identifier, params::Dict{String,<:Any}) + +Retrieve details about an existing Baseline resource by specifying its identifier. For +usage examples, see the Amazon Web Services Control Tower User Guide . + +# Arguments +- `baseline_identifier`: The ARN of the Baseline resource to be retrieved. + +""" +function get_baseline(baselineIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/get-baseline", + Dict{String,Any}("baselineIdentifier" => baselineIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_baseline( + baselineIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/get-baseline", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("baselineIdentifier" => baselineIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_baseline_operation(operation_identifier) + get_baseline_operation(operation_identifier, params::Dict{String,<:Any}) + +Returns the details of an asynchronous baseline operation, as initiated by any of these +APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A +status message is displayed in case of operation failure. For usage examples, see the +Amazon Web Services Control Tower User Guide . + +# Arguments +- `operation_identifier`: The operation ID returned from mutating asynchronous APIs + (Enable, Disable, Update, Reset). + +""" +function get_baseline_operation( + operationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/get-baseline-operation", + Dict{String,Any}("operationIdentifier" => operationIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_baseline_operation( + operationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/get-baseline-operation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("operationIdentifier" => operationIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_control_operation(operation_identifier) get_control_operation(operation_identifier, params::Dict{String,<:Any}) Returns the status of a particular EnableControl or DisableControl operation. Displays a -message in case of error. Details for an operation are available for 90 days. +message in case of error. Details for an operation are available for 90 days. For usage +examples, see the Amazon Web Services Control Tower User Guide . # Arguments - `operation_identifier`: The ID of the asynchronous operation, which is used to track @@ -150,43 +445,698 @@ function get_control_operation( end """ - list_enabled_controls(target_identifier) - list_enabled_controls(target_identifier, params::Dict{String,<:Any}) + get_enabled_baseline(enabled_baseline_identifier) + get_enabled_baseline(enabled_baseline_identifier, params::Dict{String,<:Any}) -Lists the controls enabled by AWS Control Tower on the specified organizational unit and -the accounts it contains. +Retrieve details of an EnabledBaseline resource by specifying its identifier. # Arguments -- `target_identifier`: The ARN of the organizational unit. +- `enabled_baseline_identifier`: Identifier of the EnabledBaseline resource to be + retrieved, in ARN format. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: How many results to return per API call. -- `"nextToken"`: The token to continue the list from a previous API call with the same - parameters. """ -function list_enabled_controls( - targetIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +function get_enabled_baseline( + enabledBaselineIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return controltower( "POST", - "/list-enabled-controls", - Dict{String,Any}("targetIdentifier" => targetIdentifier); + "/get-enabled-baseline", + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_enabled_controls( - targetIdentifier, +function get_enabled_baseline( + enabledBaselineIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return controltower( "POST", - "/list-enabled-controls", + "/get-enabled-baseline", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_enabled_control(enabled_control_identifier) + get_enabled_control(enabled_control_identifier, params::Dict{String,<:Any}) + +Retrieves details about an enabled control. For usage examples, see the Amazon Web +Services Control Tower User Guide . + +# Arguments +- `enabled_control_identifier`: The controlIdentifier of the enabled control. + +""" +function get_enabled_control( + enabledControlIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/get-enabled-control", + Dict{String,Any}("enabledControlIdentifier" => enabledControlIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_enabled_control( + enabledControlIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/get-enabled-control", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("targetIdentifier" => targetIdentifier), params + _merge, + Dict{String,Any}("enabledControlIdentifier" => enabledControlIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_landing_zone(landing_zone_identifier) + get_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) + +Returns details about the landing zone. Displays a message in case of error. + +# Arguments +- `landing_zone_identifier`: The unique identifier of the landing zone. + +""" +function get_landing_zone( + landingZoneIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/get-landingzone", + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_landing_zone( + landingZoneIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/get-landingzone", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_landing_zone_operation(operation_identifier) + get_landing_zone_operation(operation_identifier, params::Dict{String,<:Any}) + +Returns the status of the specified landing zone operation. Details for an operation are +available for 90 days. + +# Arguments +- `operation_identifier`: A unique identifier assigned to a landing zone operation. + +""" +function get_landing_zone_operation( + operationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/get-landingzone-operation", + Dict{String,Any}("operationIdentifier" => operationIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_landing_zone_operation( + operationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/get-landingzone-operation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("operationIdentifier" => operationIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_baselines() + list_baselines(params::Dict{String,<:Any}) + +Returns a summary list of all available baselines. For usage examples, see the Amazon Web +Services Control Tower User Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to be shown. +- `"nextToken"`: A pagination token. +""" +function list_baselines(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", "/list-baselines"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_baselines( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-baselines", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_control_operations() + list_control_operations(params::Dict{String,<:Any}) + +Provides a list of operations in progress or queued. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListControlOperations API that lets you select the + types of control operations to view. +- `"maxResults"`: The maximum number of results to be shown. +- `"nextToken"`: A pagination token. +""" +function list_control_operations(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-control-operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_control_operations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-control-operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_enabled_baselines() + list_enabled_baselines(params::Dict{String,<:Any}) + +Returns a list of summaries describing EnabledBaseline resources. You can filter the list +by the corresponding Baseline or Target of the EnabledBaseline resources. For usage +examples, see the Amazon Web Services Control Tower User Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: A filter applied on the ListEnabledBaseline operation. Allowed filters are + baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both. +- `"maxResults"`: The maximum number of results to be shown. +- `"nextToken"`: A pagination token. +""" +function list_enabled_baselines(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-enabled-baselines"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_enabled_baselines( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-enabled-baselines", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_enabled_controls() + list_enabled_controls(params::Dict{String,<:Any}) + +Lists the controls enabled by Amazon Web Services Control Tower on the specified +organizational unit and the accounts it contains. For usage examples, see the Amazon Web +Services Control Tower User Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListCEnabledControls API that lets you select the + types of control operations to view. +- `"maxResults"`: How many results to return per API call. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +- `"targetIdentifier"`: The ARN of the organizational unit. For information on how to find + the targetIdentifier, see the overview page. +""" +function list_enabled_controls(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-enabled-controls"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_enabled_controls( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-enabled-controls", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_landing_zones() + list_landing_zones(params::Dict{String,<:Any}) + +Returns the landing zone ARN for the landing zone deployed in your managed account. This +API also creates an ARN for existing accounts that do not yet have a landing zone ARN. +Returns one landing zone ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of returned landing zone ARNs, which is one. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +""" +function list_landing_zones(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", "/list-landingzones"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_landing_zones( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-landingzones", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags associated with the resource. For usage examples, see the Amazon +Web Services Control Tower User Guide . + +# Arguments +- `resource_arn`: The ARN of the resource. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + reset_enabled_baseline(enabled_baseline_identifier) + reset_enabled_baseline(enabled_baseline_identifier, params::Dict{String,<:Any}) + +Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing +Baseline after a new member account is moved to the target OU. For usage examples, see the +Amazon Web Services Control Tower User Guide . + +# Arguments +- `enabled_baseline_identifier`: Specifies the ID of the EnabledBaseline resource to be + re-enabled, in ARN format. + +""" +function reset_enabled_baseline( + enabledBaselineIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/reset-enabled-baseline", + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_enabled_baseline( + enabledBaselineIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/reset-enabled-baseline", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("enabledBaselineIdentifier" => enabledBaselineIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + reset_landing_zone(landing_zone_identifier) + reset_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) + +This API call resets a landing zone. It starts an asynchronous operation that resets the +landing zone to the parameters specified in its original configuration. + +# Arguments +- `landing_zone_identifier`: The unique identifier of the landing zone. + +""" +function reset_landing_zone( + landingZoneIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/reset-landingzone", + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_landing_zone( + landingZoneIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/reset-landingzone", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("landingZoneIdentifier" => landingZoneIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower +User Guide . + +# Arguments +- `resource_arn`: The ARN of the resource to be tagged. +- `tags`: Tags to be applied to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from a resource. For usage examples, see the Amazon Web Services Control +Tower User Guide . + +# Arguments +- `resource_arn`: The ARN of the resource. +- `tag_keys`: Tag keys to be removed from the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_enabled_baseline(baseline_version, enabled_baseline_identifier) + update_enabled_baseline(baseline_version, enabled_baseline_identifier, params::Dict{String,<:Any}) + +Updates an EnabledBaseline resource's applied parameters or version. For usage examples, +see the Amazon Web Services Control Tower User Guide . + +# Arguments +- `baseline_version`: Specifies the new Baseline version, to which the EnabledBaseline + should be updated. +- `enabled_baseline_identifier`: Specifies the EnabledBaseline resource to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: Parameters to apply when making an update. +""" +function update_enabled_baseline( + baselineVersion, + enabledBaselineIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/update-enabled-baseline", + Dict{String,Any}( + "baselineVersion" => baselineVersion, + "enabledBaselineIdentifier" => enabledBaselineIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_enabled_baseline( + baselineVersion, + enabledBaselineIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/update-enabled-baseline", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "baselineVersion" => baselineVersion, + "enabledBaselineIdentifier" => enabledBaselineIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_enabled_control(enabled_control_identifier, parameters) + update_enabled_control(enabled_control_identifier, parameters, params::Dict{String,<:Any}) + + Updates the configuration of an already enabled control. If the enabled control shows an +EnablementStatus of SUCCEEDED, supply parameters that are different from the currently +configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the +request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services +Control Tower will update the control to match any valid parameters that you supply. If the +DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, +you can update the control by calling DisableControl and again calling EnableControl, or +you can run an extending governance operation. For usage examples, see the Amazon Web +Services Control Tower User Guide + +# Arguments +- `enabled_control_identifier`: The ARN of the enabled control that will be updated. +- `parameters`: A key/value pair, where Key is of type String and Value is of type Document. + +""" +function update_enabled_control( + enabledControlIdentifier, parameters; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/update-enabled-control", + Dict{String,Any}( + "enabledControlIdentifier" => enabledControlIdentifier, + "parameters" => parameters, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_enabled_control( + enabledControlIdentifier, + parameters, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/update-enabled-control", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "enabledControlIdentifier" => enabledControlIdentifier, + "parameters" => parameters, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_landing_zone(landing_zone_identifier, manifest, version) + update_landing_zone(landing_zone_identifier, manifest, version, params::Dict{String,<:Any}) + +This API call updates the landing zone. It starts an asynchronous operation that updates +the landing zone based on the new landing zone version, or on the changed parameters +specified in the updated manifest file. + +# Arguments +- `landing_zone_identifier`: The unique identifier of the landing zone. +- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services + resources. For examples, review Launch your landing zone. +- `version`: The landing zone version, for example, 3.2. + +""" +function update_landing_zone( + landingZoneIdentifier, + manifest, + version; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/update-landingzone", + Dict{String,Any}( + "landingZoneIdentifier" => landingZoneIdentifier, + "manifest" => manifest, + "version" => version, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_landing_zone( + landingZoneIdentifier, + manifest, + version, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controltower( + "POST", + "/update-landingzone", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "landingZoneIdentifier" => landingZoneIdentifier, + "manifest" => manifest, + "version" => version, + ), + params, ), ); aws_config=aws_config, diff --git a/src/services/cost_and_usage_report_service.jl b/src/services/cost_and_usage_report_service.jl index 6be4cc8b91..6cd6cc16c5 100644 --- a/src/services/cost_and_usage_report_service.jl +++ b/src/services/cost_and_usage_report_service.jl @@ -5,27 +5,36 @@ using AWS.Compat using AWS.UUIDs """ - delete_report_definition() - delete_report_definition(params::Dict{String,<:Any}) + delete_report_definition(report_name) + delete_report_definition(report_name, params::Dict{String,<:Any}) -Deletes the specified report. +Deletes the specified report. Any tags associated with the report are also deleted. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ReportName"`: The name of the report that you want to delete. The name must be unique, +# Arguments +- `report_name`: The name of the report that you want to delete. The name must be unique, is case sensitive, and can't include spaces. + """ -function delete_report_definition(; aws_config::AbstractAWSConfig=global_aws_config()) +function delete_report_definition( + ReportName; aws_config::AbstractAWSConfig=global_aws_config() +) return cost_and_usage_report_service( - "DeleteReportDefinition"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DeleteReportDefinition", + Dict{String,Any}("ReportName" => ReportName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function delete_report_definition( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + ReportName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return cost_and_usage_report_service( "DeleteReportDefinition", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ReportName" => ReportName), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -35,7 +44,7 @@ end describe_report_definitions() describe_report_definitions(params::Dict{String,<:Any}) -Lists the AWS Cost and Usage reports available to this account. +Lists the Amazon Web Services Cost and Usage Report available to this account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -58,11 +67,46 @@ function describe_report_definitions( ) end +""" + list_tags_for_resource(report_name) + list_tags_for_resource(report_name, params::Dict{String,<:Any}) + +Lists the tags associated with the specified report definition. + +# Arguments +- `report_name`: The report name of the report definition that tags are to be returned for. + +""" +function list_tags_for_resource( + ReportName; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_and_usage_report_service( + "ListTagsForResource", + Dict{String,Any}("ReportName" => ReportName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ReportName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_and_usage_report_service( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ReportName" => ReportName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_report_definition(report_definition, report_name) modify_report_definition(report_definition, report_name, params::Dict{String,<:Any}) -Allows you to programatically update your report preferences. +Allows you to programmatically update your report preferences. # Arguments - `report_definition`: @@ -113,6 +157,9 @@ Creates a new report using the description that you provide. - `report_definition`: Represents the output of the PutReportDefinition operation. The content consists of the detailed metadata and data file information. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The tags to be assigned to the report definition resource. """ function put_report_definition( ReportDefinition; aws_config::AbstractAWSConfig=global_aws_config() @@ -140,3 +187,83 @@ function put_report_definition( feature_set=SERVICE_FEATURE_SET, ) end + +""" + tag_resource(report_name, tags) + tag_resource(report_name, tags, params::Dict{String,<:Any}) + +Associates a set of tags with a report definition. + +# Arguments +- `report_name`: The report name of the report definition that tags are to be associated + with. +- `tags`: The tags to be assigned to the report definition resource. + +""" +function tag_resource(ReportName, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_and_usage_report_service( + "TagResource", + Dict{String,Any}("ReportName" => ReportName, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ReportName, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_and_usage_report_service( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ReportName" => ReportName, "Tags" => Tags), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(report_name, tag_keys) + untag_resource(report_name, tag_keys, params::Dict{String,<:Any}) + +Disassociates a set of tags from a report definition. + +# Arguments +- `report_name`: The report name of the report definition that tags are to be disassociated + from. +- `tag_keys`: The tags to be disassociated from the report definition resource. + +""" +function untag_resource( + ReportName, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_and_usage_report_service( + "UntagResource", + Dict{String,Any}("ReportName" => ReportName, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ReportName, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_and_usage_report_service( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReportName" => ReportName, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/cost_explorer.jl b/src/services/cost_explorer.jl index 1d3ba7ccfe..2bdddf4504 100644 --- a/src/services/cost_explorer.jl +++ b/src/services/cost_explorer.jl @@ -433,6 +433,60 @@ function get_anomaly_subscriptions( ) end +""" + get_approximate_usage_records(approximation_dimension, granularity) + get_approximate_usage_records(approximation_dimension, granularity, params::Dict{String,<:Any}) + +Retrieves estimated usage records for hourly granularity or resource-level data at daily +granularity. + +# Arguments +- `approximation_dimension`: The service to evaluate for the usage records. You can choose + resource-level data at daily granularity, or hourly granularity with or without + resource-level data. +- `granularity`: How granular you want the data to be. You can enable data at hourly or + daily granularity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Services"`: The service metadata for the service or services you want to query. If not + specified, all elements are returned. +""" +function get_approximate_usage_records( + ApproximationDimension, Granularity; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "GetApproximateUsageRecords", + Dict{String,Any}( + "ApproximationDimension" => ApproximationDimension, "Granularity" => Granularity + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_approximate_usage_records( + ApproximationDimension, + Granularity, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_explorer( + "GetApproximateUsageRecords", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApproximationDimension" => ApproximationDimension, + "Granularity" => Granularity, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_cost_and_usage(granularity, metrics, time_period) get_cost_and_usage(granularity, metrics, time_period, params::Dict{String,<:Any}) @@ -528,11 +582,11 @@ cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you w request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have -access to all member accounts. This API is currently available for the Amazon Elastic -Compute Cloud – Compute service only. This is an opt-in only feature. You can enable -this feature from the Cost Explorer Settings page. For information about how to access the -Settings page, see Controlling Access for Cost Explorer in the Billing and Cost Management -User Guide. +access to all member accounts. Hourly granularity is only available for EC2-Instances +(Elastic Compute Cloud) resource-level data. All other resource-level data is available at +daily granularity. This is an opt-in only feature. You can enable this feature from the +Cost Explorer Settings page. For information about how to access the Settings page, see +Controlling Access for Cost Explorer in the Billing and Cost Management User Guide. # Arguments - `filter`: Filters Amazon Web Services costs by different dimensions. For example, you can @@ -1111,6 +1165,47 @@ function get_rightsizing_recommendation( ) end +""" + get_savings_plan_purchase_recommendation_details(recommendation_detail_id) + get_savings_plan_purchase_recommendation_details(recommendation_detail_id, params::Dict{String,<:Any}) + +Retrieves the details for a Savings Plan recommendation. These details include the hourly +data-points that construct the cost, coverage, and utilization charts. + +# Arguments +- `recommendation_detail_id`: The ID that is associated with the Savings Plan + recommendation. + +""" +function get_savings_plan_purchase_recommendation_details( + RecommendationDetailId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "GetSavingsPlanPurchaseRecommendationDetails", + Dict{String,Any}("RecommendationDetailId" => RecommendationDetailId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_savings_plan_purchase_recommendation_details( + RecommendationDetailId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_explorer( + "GetSavingsPlanPurchaseRecommendationDetails", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RecommendationDetailId" => RecommendationDetailId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_savings_plans_coverage(time_period) get_savings_plans_coverage(time_period, params::Dict{String,<:Any}) @@ -1498,6 +1593,39 @@ function get_usage_forecast( ) end +""" + list_cost_allocation_tag_backfill_history() + list_cost_allocation_tag_backfill_history(params::Dict{String,<:Any}) + + Retrieves a list of your historical cost allocation tag backfill requests. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that are returned for this request. +- `"NextToken"`: The token to retrieve the next set of results. Amazon Web Services + provides the token when the response from a previous call has more results than the maximum + page size. +""" +function list_cost_allocation_tag_backfill_history(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "ListCostAllocationTagBackfillHistory"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cost_allocation_tag_backfill_history( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "ListCostAllocationTagBackfillHistory", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_cost_allocation_tags() list_cost_allocation_tags(params::Dict{String,<:Any}) @@ -1686,6 +1814,45 @@ function provide_anomaly_feedback( ) end +""" + start_cost_allocation_tag_backfill(backfill_from) + start_cost_allocation_tag_backfill(backfill_from, params::Dict{String,<:Any}) + + Request a cost allocation tag backfill. This will backfill the activation status (either +active or inactive) for all tag keys from para:BackfillFrom up to the when this request is +made. You can request a backfill once every 24 hours. + +# Arguments +- `backfill_from`: The date you want the backfill to start from. The date can only be a + first day of the month (a billing start date). Dates can't precede the previous twelve + months, or in the future. + +""" +function start_cost_allocation_tag_backfill( + BackfillFrom; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "StartCostAllocationTagBackfill", + Dict{String,Any}("BackfillFrom" => BackfillFrom); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_cost_allocation_tag_backfill( + BackfillFrom, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_explorer( + "StartCostAllocationTagBackfill", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("BackfillFrom" => BackfillFrom), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_savings_plans_purchase_recommendation_generation() start_savings_plans_purchase_recommendation_generation(params::Dict{String,<:Any}) @@ -1869,7 +2036,9 @@ end update_anomaly_subscription(subscription_arn) update_anomaly_subscription(subscription_arn, params::Dict{String,<:Any}) -Updates an existing cost anomaly monitor subscription. +Updates an existing cost anomaly subscription. Specify the fields that you want to update. +Omitted fields are unchanged. The JSON below describes the generic construct for each +type. See Request Parameters for possible values as they apply to AnomalySubscription. # Arguments - `subscription_arn`: A cost anomaly subscription Amazon Resource Name (ARN). @@ -1883,20 +2052,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Threshold"`: (deprecated) The update to the threshold value for receiving notifications. This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a - ThresholdExpression. + ThresholdExpression. You can specify either Threshold or ThresholdExpression, but not both. - `"ThresholdExpression"`: The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and - ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The - match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and - 10,000,000,000. The following are examples of valid ThresholdExpressions: Absolute - threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", - \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } Percentage - threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", - \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } AND two - thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": + ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and + TotalImpactPercentage, respectively (see Impact for more details). The supported nested + expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values + must be numbers between 0 and 10,000,000,000 in string format. You can specify either + Threshold or ThresholdExpression, but not both. The following are examples of valid + ThresholdExpressions: Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], - \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": + \"Values\": [ \"100\" ] } } Percentage threshold: { \"Dimensions\": { \"Key\": + \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], + \"Values\": [ \"100\" ] } } AND two thresholds together: { \"And\": [ { \"Dimensions\": + { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" + ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] } OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ diff --git a/src/services/cost_optimization_hub.jl b/src/services/cost_optimization_hub.jl new file mode 100644 index 0000000000..ea3d0ddd6a --- /dev/null +++ b/src/services/cost_optimization_hub.jl @@ -0,0 +1,233 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: cost_optimization_hub +using AWS.Compat +using AWS.UUIDs + +""" + get_preferences() + get_preferences(params::Dict{String,<:Any}) + +Returns a set of preferences for an account in order to add account-specific preferences +into the service. These preferences impact how the savings associated with recommendations +are presented—estimated savings after discounts or estimated savings before discounts, +for example. + +""" +function get_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_optimization_hub( + "GetPreferences"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_preferences( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "GetPreferences", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + get_recommendation(recommendation_id) + get_recommendation(recommendation_id, params::Dict{String,<:Any}) + +Returns both the current and recommended resource configuration and the estimated cost +impact for a recommendation. The recommendationId is only valid for up to a maximum of 24 +hours as recommendations are refreshed daily. To retrieve the recommendationId, use the +ListRecommendations API. + +# Arguments +- `recommendation_id`: The ID for the recommendation. + +""" +function get_recommendation( + recommendationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "GetRecommendation", + Dict{String,Any}("recommendationId" => recommendationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_recommendation( + recommendationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_optimization_hub( + "GetRecommendation", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("recommendationId" => recommendationId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_enrollment_statuses() + list_enrollment_statuses(params::Dict{String,<:Any}) + +Retrieves the enrollment status for an account. It can also return the list of accounts +that are enrolled under the organization. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountId"`: The account ID of a member account in the organization. +- `"includeOrganizationInfo"`: Indicates whether to return the enrollment status for the + organization. +- `"maxResults"`: The maximum number of objects that are returned for the request. +- `"nextToken"`: The token to retrieve the next set of results. +""" +function list_enrollment_statuses(; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_optimization_hub( + "ListEnrollmentStatuses"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_enrollment_statuses( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "ListEnrollmentStatuses", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_recommendation_summaries(group_by) + list_recommendation_summaries(group_by, params::Dict{String,<:Any}) + +Returns a concise representation of savings estimates for resources. Also returns de-duped +savings across different types of recommendations. The following filters are not supported +for this API: recommendationIds, resourceArns, and resourceIds. + +# Arguments +- `group_by`: The grouping of recommendations by a dimension. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: +- `"maxResults"`: The maximum number of recommendations that are returned for the request. +- `"nextToken"`: The token to retrieve the next set of results. +""" +function list_recommendation_summaries( + groupBy; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "ListRecommendationSummaries", + Dict{String,Any}("groupBy" => groupBy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_recommendation_summaries( + groupBy, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "ListRecommendationSummaries", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("groupBy" => groupBy), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_recommendations() + list_recommendations(params::Dict{String,<:Any}) + +Returns a list of recommendations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: The constraints that you want all returned recommendations to match. +- `"includeAllRecommendations"`: List of all recommendations for a resource, or a single + recommendation if de-duped by resourceId. +- `"maxResults"`: The maximum number of recommendations that are returned for the request. +- `"nextToken"`: The token to retrieve the next set of results. +- `"orderBy"`: The ordering of recommendations by a dimension. +""" +function list_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_optimization_hub( + "ListRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "ListRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_enrollment_status(status) + update_enrollment_status(status, params::Dict{String,<:Any}) + +Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization +Hub service. If the account is a management account of an organization, this action can +also be used to enroll member accounts of the organization. You must have the appropriate +permissions to opt in to Cost Optimization Hub and to view its recommendations. When you +opt in, Cost Optimization Hub automatically creates a service-linked role in your account +to access its data. + +# Arguments +- `status`: Sets the account status. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeMemberAccounts"`: Indicates whether to enroll member accounts of the + organization if the account is the management account. +""" +function update_enrollment_status(status; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_optimization_hub( + "UpdateEnrollmentStatus", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_enrollment_status( + status, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "UpdateEnrollmentStatus", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_preferences() + update_preferences(params::Dict{String,<:Any}) + +Updates a set of preferences for an account in order to add account-specific preferences +into the service. These preferences impact how the savings associated with recommendations +are presented. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"memberAccountDiscountVisibility"`: Sets the \"member account discount visibility\" + preference. +- `"savingsEstimationMode"`: Sets the \"savings estimation mode\" preference. +""" +function update_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) + return cost_optimization_hub( + "UpdatePreferences"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_preferences( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_optimization_hub( + "UpdatePreferences", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end diff --git a/src/services/customer_profiles.jl b/src/services/customer_profiles.jl index f01d0f21bd..5ea429cff8 100644 --- a/src/services/customer_profiles.jl +++ b/src/services/customer_profiles.jl @@ -140,8 +140,13 @@ attributes, object types, profile keys, and encryption keys. You can create mult domains, and each domain can have multiple third-party integrations. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. Use this API or UpdateDomain to enable identity resolution: set -Matching to true. To prevent cross-service impersonation when you call this API, see -Cross-service confused deputy prevention for sample policies that you should apply. +Matching to true. To prevent cross-service impersonation when you call this API, see +Cross-service confused deputy prevention for sample policies that you should apply. It is +not possible to associate a Customer Profiles domain with an Amazon Connect Instance +directly from the API. If you would like to create a domain and associate a Customer +Profiles domain, use the Amazon Connect admin website. For more information, see Enable +Customer Profiles. Each Amazon Connect instance can be associated with only one domain. +Multiple Amazon Connect instances can be associated with one domain. # Arguments - `default_expiration_days`: The default number of days until the data within the domain @@ -164,6 +169,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3. +- `"RuleBasedMatching"`: The process of matching duplicate profiles using the Rule-Based + matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match + and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the + results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you + can download the results from S3. - `"Tags"`: The tags used to organize, track, or control access for this resource. """ function create_domain( @@ -326,7 +337,7 @@ customer profile in a domain. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AccountNumber"`: A unique account number that you have given to the customer. +- `"AccountNumber"`: An account number that you have given to the customer. - `"AdditionalInformation"`: Any additional information relevant to the customer’s profile. - `"Address"`: A generic address associated with the customer that is not mailing, @@ -751,6 +762,43 @@ function delete_workflow( ) end +""" + detect_profile_object_type(domain_name, objects) + detect_profile_object_type(domain_name, objects, params::Dict{String,<:Any}) + +The process of detecting profile object type mapping by using given objects. + +# Arguments +- `domain_name`: The unique name of the domain. +- `objects`: A string that is serialized from a JSON object. + +""" +function detect_profile_object_type( + DomainName, Objects; aws_config::AbstractAWSConfig=global_aws_config() +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/detect/object-types", + Dict{String,Any}("Objects" => Objects); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function detect_profile_object_type( + DomainName, + Objects, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/detect/object-types", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Objects" => Objects), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_auto_merging_preview(conflict_resolution, consolidation, domain_name) get_auto_merging_preview(conflict_resolution, consolidation, domain_name, params::Dict{String,<:Any}) @@ -1161,6 +1209,69 @@ function get_profile_object_type_template( ) end +""" + get_similar_profiles(domain_name, match_type, search_key, search_value) + get_similar_profiles(domain_name, match_type, search_key, search_value, params::Dict{String,<:Any}) + +Returns a set of profiles that belong to the same matching group using the matchId or +profileId. You can also specify the type of matching that you want for finding similar +profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING. + +# Arguments +- `domain_name`: The unique name of the domain. +- `match_type`: Specify the type of matching to get similar profiles for. +- `search_key`: The string indicating the search key to be used. +- `search_value`: The string based on SearchKey to be searched for similar profiles. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of objects returned per page. +- `"next-token"`: The pagination token from the previous GetSimilarProfiles API call. +""" +function get_similar_profiles( + DomainName, + MatchType, + SearchKey, + SearchValue; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/matches", + Dict{String,Any}( + "MatchType" => MatchType, "SearchKey" => SearchKey, "SearchValue" => SearchValue + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_similar_profiles( + DomainName, + MatchType, + SearchKey, + SearchValue, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/matches", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MatchType" => MatchType, + "SearchKey" => SearchKey, + "SearchValue" => SearchValue, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_workflow(domain_name, workflow_id) get_workflow(domain_name, workflow_id, params::Dict{String,<:Any}) @@ -1610,6 +1721,44 @@ function list_profile_objects( ) end +""" + list_rule_based_matches(domain_name) + list_rule_based_matches(domain_name, params::Dict{String,<:Any}) + +Returns a set of MatchIds that belong to the given domain. + +# Arguments +- `domain_name`: The unique name of the domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of MatchIds returned per page. +- `"next-token"`: The pagination token from the previous ListRuleBasedMatches API call. +""" +function list_rule_based_matches( + DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return customer_profiles( + "GET", + "/domains/$(DomainName)/profiles/ruleBasedMatches"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_rule_based_matches( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "GET", + "/domains/$(DomainName)/profiles/ruleBasedMatches", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1878,6 +2027,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ExpirationDays"`: The number of days until the data in the object expires. - `"Fields"`: A map of the name and ObjectType field. - `"Keys"`: A list of unique keys that can be used to map data to the profile. +- `"MaxProfileObjectCount"`: The amount of profile object max count assigned to the object + type - `"SourceLastUpdatedTimestampFormat"`: The format of your sourceLastUpdatedTimestamp that was previously set up. - `"Tags"`: The tags used to organize, track, or control access for this resource. @@ -2121,7 +2272,7 @@ end Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key. After a domain is created, the name can’t be changed. Use this API or -CreateDomain to enable identity resolution: set Matching to true. To prevent cross-service +CreateDomain to enable identity resolution: set Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. To add or remove tags on an existing Domain, see TagResource/UntagResource. @@ -2149,6 +2300,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3. +- `"RuleBasedMatching"`: The process of matching duplicate profiles using the rule-Based + matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match + and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the + results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you + can download the results from S3. - `"Tags"`: The tags used to organize, track, or control access for this resource. """ function update_domain(DomainName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2188,7 +2345,7 @@ already there will be kept. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AccountNumber"`: A unique account number that you have given to the customer. +- `"AccountNumber"`: An account number that you have given to the customer. - `"AdditionalInformation"`: Any additional information relevant to the customer’s profile. - `"Address"`: A generic address associated with the customer that is not mailing, diff --git a/src/services/database_migration_service.jl b/src/services/database_migration_service.jl index c1b7248ec3..580f204c2b 100644 --- a/src/services/database_migration_service.jl +++ b/src/services/database_migration_service.jl @@ -187,6 +187,54 @@ function cancel_replication_task_assessment_run( ) end +""" + create_data_provider(engine, settings) + create_data_provider(engine, settings, params::Dict{String,<:Any}) + +Creates a data provider using the provided settings. A data provider stores a data store +type and location information about your database. + +# Arguments +- `engine`: The type of database engine for the data provider. Valid values include + \"aurora\", \"aurora-postgresql\", \"mysql\", \"oracle\", \"postgres\", \"sqlserver\", + redshift, mariadb, mongodb, and docdb. A value of \"aurora\" represents Amazon Aurora + MySQL-Compatible Edition. +- `settings`: The settings in JSON format for a data provider. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DataProviderName"`: A user-friendly name for the data provider. +- `"Description"`: A user-friendly description of the data provider. +- `"Tags"`: One or more tags to be assigned to the data provider. +""" +function create_data_provider( + Engine, Settings; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "CreateDataProvider", + Dict{String,Any}("Engine" => Engine, "Settings" => Settings); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_provider( + Engine, + Settings, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateDataProvider", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("Engine" => Engine, "Settings" => Settings), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_endpoint(endpoint_identifier, endpoint_type, engine_name) create_endpoint(endpoint_identifier, endpoint_type, engine_name, params::Dict{String,<:Any}) @@ -299,6 +347,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide. - `"Tags"`: One or more tags to be assigned to the endpoint. +- `"TimestreamSettings"`: Settings in JSON format for the target Amazon Timestream endpoint. - `"Username"`: The user name to be used to log in to the endpoint database. """ function create_endpoint( @@ -477,6 +526,220 @@ function create_fleet_advisor_collector( ) end +""" + create_instance_profile() + create_instance_profile(params::Dict{String,<:Any}) + +Creates the instance profile using the specified parameters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AvailabilityZone"`: The Availability Zone where the instance profile will be created. + The default value is a random, system-chosen Availability Zone in the Amazon Web Services + Region where your data provider is created, for examplem us-east-1d. +- `"Description"`: A user-friendly description of the instance profile. +- `"InstanceProfileName"`: A user-friendly name for the instance profile. +- `"KmsKeyArn"`: The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the + connection parameters for the instance profile. If you don't specify a value for the + KmsKeyArn parameter, then DMS uses your default encryption key. KMS creates the default + encryption key for your Amazon Web Services account. Your Amazon Web Services account has a + different default encryption key for each Amazon Web Services Region. +- `"NetworkType"`: Specifies the network type for the instance profile. A value of IPV4 + represents an instance profile with IPv4 network type and only supports IPv4 addressing. A + value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 + addressing. A value of DUAL represents an instance profile with dual network type that + supports IPv4 and IPv6 addressing. +- `"PubliclyAccessible"`: Specifies the accessibility options for the instance profile. A + value of true represents an instance profile with a public IP address. A value of false + represents an instance profile with a private IP address. The default value is true. +- `"SubnetGroupIdentifier"`: A subnet group to associate with the instance profile. +- `"Tags"`: One or more tags to be assigned to the instance profile. +- `"VpcSecurityGroups"`: Specifies the VPC security group names to be used with the + instance profile. The VPC security group must work with the VPC containing the instance + profile. +""" +function create_instance_profile(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "CreateInstanceProfile"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function create_instance_profile( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "CreateInstanceProfile", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_migration_project(instance_profile_identifier, source_data_provider_descriptors, target_data_provider_descriptors) + create_migration_project(instance_profile_identifier, source_data_provider_descriptors, target_data_provider_descriptors, params::Dict{String,<:Any}) + +Creates the migration project using the specified parameters. You can run this action only +after you create an instance profile and data providers using CreateInstanceProfile and +CreateDataProvider. + +# Arguments +- `instance_profile_identifier`: The identifier of the associated instance profile. + Identifiers must begin with a letter and must contain only ASCII letters, digits, and + hyphens. They can't end with a hyphen, or contain two consecutive hyphens. +- `source_data_provider_descriptors`: Information about the source data provider, including + the name, ARN, and Secrets Manager parameters. +- `target_data_provider_descriptors`: Information about the target data provider, including + the name, ARN, and Amazon Web Services Secrets Manager parameters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A user-friendly description of the migration project. +- `"MigrationProjectName"`: A user-friendly name for the migration project. +- `"SchemaConversionApplicationAttributes"`: The schema conversion application attributes, + including the Amazon S3 bucket name and Amazon S3 role ARN. +- `"Tags"`: One or more tags to be assigned to the migration project. +- `"TransformationRules"`: The settings in JSON format for migration rules. Migration rules + make it possible for you to change the object names according to the rules that you + specify. For example, you can change an object name to lowercase or uppercase, add or + remove a prefix or suffix, or rename objects. +""" +function create_migration_project( + InstanceProfileIdentifier, + SourceDataProviderDescriptors, + TargetDataProviderDescriptors; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateMigrationProject", + Dict{String,Any}( + "InstanceProfileIdentifier" => InstanceProfileIdentifier, + "SourceDataProviderDescriptors" => SourceDataProviderDescriptors, + "TargetDataProviderDescriptors" => TargetDataProviderDescriptors, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_migration_project( + InstanceProfileIdentifier, + SourceDataProviderDescriptors, + TargetDataProviderDescriptors, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateMigrationProject", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceProfileIdentifier" => InstanceProfileIdentifier, + "SourceDataProviderDescriptors" => SourceDataProviderDescriptors, + "TargetDataProviderDescriptors" => TargetDataProviderDescriptors, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_replication_config(compute_config, replication_config_identifier, replication_type, source_endpoint_arn, table_mappings, target_endpoint_arn) + create_replication_config(compute_config, replication_config_identifier, replication_type, source_endpoint_arn, table_mappings, target_endpoint_arn, params::Dict{String,<:Any}) + +Creates a configuration that you can later provide to configure and start an DMS Serverless +replication. You can also provide options to validate the configuration inputs before you +start the replication. + +# Arguments +- `compute_config`: Configuration parameters for provisioning an DMS Serverless replication. +- `replication_config_identifier`: A unique identifier that you want to use to create a + ReplicationConfigArn that is returned as part of the output from this action. You can then + pass this output ReplicationConfigArn as the value of the ReplicationConfigArn option for + other actions to identify both DMS Serverless replications and replication configurations + that you want those actions to operate on. For some actions, you can also use either this + unique identifier or a corresponding ARN in action filters to identify the specific + replication and replication configuration to operate on. +- `replication_type`: The type of DMS Serverless replication to provision using this + replication configuration. Possible values: \"full-load\" \"cdc\" + \"full-load-and-cdc\" +- `source_endpoint_arn`: The Amazon Resource Name (ARN) of the source endpoint for this DMS + Serverless replication configuration. +- `table_mappings`: JSON table mappings for DMS Serverless replications that are + provisioned using this replication configuration. For more information, see Specifying + table selection and transformations rules using JSON. +- `target_endpoint_arn`: The Amazon Resource Name (ARN) of the target endpoint for this DMS + serverless replication configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ReplicationSettings"`: Optional JSON settings for DMS Serverless replications that are + provisioned using this replication configuration. For example, see Change processing + tuning settings. +- `"ResourceIdentifier"`: Optional unique value or name that you set for a given resource + that can be used to construct an Amazon Resource Name (ARN) for that resource. For more + information, see Fine-grained access control using resource names and tags. +- `"SupplementalSettings"`: Optional JSON settings for specifying supplemental data. For + more information, see Specifying supplemental data for task settings. +- `"Tags"`: One or more optional tags associated with resources used by the DMS Serverless + replication. For more information, see Tagging resources in Database Migration Service. +""" +function create_replication_config( + ComputeConfig, + ReplicationConfigIdentifier, + ReplicationType, + SourceEndpointArn, + TableMappings, + TargetEndpointArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateReplicationConfig", + Dict{String,Any}( + "ComputeConfig" => ComputeConfig, + "ReplicationConfigIdentifier" => ReplicationConfigIdentifier, + "ReplicationType" => ReplicationType, + "SourceEndpointArn" => SourceEndpointArn, + "TableMappings" => TableMappings, + "TargetEndpointArn" => TargetEndpointArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_replication_config( + ComputeConfig, + ReplicationConfigIdentifier, + ReplicationType, + SourceEndpointArn, + TableMappings, + TargetEndpointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComputeConfig" => ComputeConfig, + "ReplicationConfigIdentifier" => ReplicationConfigIdentifier, + "ReplicationType" => ReplicationType, + "SourceEndpointArn" => SourceEndpointArn, + "TableMappings" => TableMappings, + "TargetEndpointArn" => TargetEndpointArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_replication_instance(replication_instance_class, replication_instance_identifier) create_replication_instance(replication_instance_class, replication_instance_identifier, params::Dict{String,<:Any}) @@ -485,14 +748,17 @@ Creates the replication instance using the specified parameters. DMS requires th account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the CLI and DMS API. For information on the required permissions, see IAM Permissions Needed to -Use DMS. +Use DMS. If you don't specify a version when creating a replication instance, DMS will +create the instance using the default engine version. For information about the default +engine version, see Release Notes. # Arguments - `replication_instance_class`: The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information on the settings and capacities for the available replication instance classes, see - Selecting the right DMS replication instance for your migration. + Choosing the right DMS replication instance; and, Selecting the best size for a replication + instance. - `replication_instance_identifier`: The replication instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain 1-63 alphanumeric characters or hyphens. First character must be a letter. Can't end with a hyphen or contain two @@ -504,15 +770,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the replication instance. - `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This - parameter defaults to true. Default: true When AutoMinorVersionUpgrade is enabled, DMS - uses the current default engine version when you create a replication instance. For - example, if you set EngineVersion to a lower version number than the current default - version, DMS uses the default version. If AutoMinorVersionUpgrade isn’t enabled when you - create a replication instance, DMS uses the engine version specified by the EngineVersion - parameter. + parameter defaults to true. Default: true - `"AvailabilityZone"`: The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's - Amazon Web Services Region, for example: us-east-1d + Amazon Web Services Region, for example: us-east-1d. - `"DnsNameServers"`: A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a @@ -599,7 +860,11 @@ end Creates a replication subnet group given a list of the subnet IDs in a VPC. The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs -exception. +exception. If a replication subnet group exists in your Amazon Web Services account, the +CreateReplicationSubnetGroup action returns the following error message: The Replication +Subnet Group already exists. In this case, delete the existing replication subnet group. To +do so, use the DeleteReplicationSubnetGroup action. Optionally, choose Subnet groups in the +DMS console, then choose your subnet group. Next, choose Delete from Actions. # Arguments - `replication_subnet_group_description`: The description for the subnet group. @@ -607,7 +872,7 @@ exception. value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be \"default\". Example: mySubnetgroup -- `subnet_ids`: One or more subnet IDs to be assigned to the subnet group. +- `subnet_ids`: Two or more subnet IDs to be assigned to the subnet group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -697,7 +962,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: - --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + --cdc-stop-position “commit_time:2018-02-09T12:12:12“ - `"ReplicationTaskSettings"`: Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for Database Migration Service Tasks in the Database Migration Service User Guide. @@ -850,6 +1115,46 @@ function delete_connection( ) end +""" + delete_data_provider(data_provider_identifier) + delete_data_provider(data_provider_identifier, params::Dict{String,<:Any}) + +Deletes the specified data provider. All migration projects associated with the data +provider must be deleted or modified before you can delete the data provider. + +# Arguments +- `data_provider_identifier`: The identifier of the data provider to delete. + +""" +function delete_data_provider( + DataProviderIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DeleteDataProvider", + Dict{String,Any}("DataProviderIdentifier" => DataProviderIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_provider( + DataProviderIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DeleteDataProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DataProviderIdentifier" => DataProviderIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_endpoint(endpoint_arn) delete_endpoint(endpoint_arn, params::Dict{String,<:Any}) @@ -996,6 +1301,131 @@ function delete_fleet_advisor_databases( ) end +""" + delete_instance_profile(instance_profile_identifier) + delete_instance_profile(instance_profile_identifier, params::Dict{String,<:Any}) + +Deletes the specified instance profile. All migration projects associated with the +instance profile must be deleted or modified before you can delete the instance profile. + +# Arguments +- `instance_profile_identifier`: The identifier of the instance profile to delete. + +""" +function delete_instance_profile( + InstanceProfileIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DeleteInstanceProfile", + Dict{String,Any}("InstanceProfileIdentifier" => InstanceProfileIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_instance_profile( + InstanceProfileIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DeleteInstanceProfile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstanceProfileIdentifier" => InstanceProfileIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_migration_project(migration_project_identifier) + delete_migration_project(migration_project_identifier, params::Dict{String,<:Any}) + +Deletes the specified migration project. The migration project must be closed before you +can delete it. + +# Arguments +- `migration_project_identifier`: The name or Amazon Resource Name (ARN) of the migration + project to delete. + +""" +function delete_migration_project( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DeleteMigrationProject", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_migration_project( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DeleteMigrationProject", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_replication_config(replication_config_arn) + delete_replication_config(replication_config_arn, params::Dict{String,<:Any}) + +Deletes an DMS Serverless replication configuration. This effectively deprovisions any and +all replications that use this configuration. You can't delete the configuration for an DMS +Serverless replication that is ongoing. You can delete the configuration when the +replication is in a non-RUNNING and non-STARTING state. + +# Arguments +- `replication_config_arn`: The replication config to delete. + +""" +function delete_replication_config( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DeleteReplicationConfig", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_replication_config( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DeleteReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_replication_instance(replication_instance_arn) delete_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) @@ -1321,46 +1751,124 @@ function describe_connections( end """ - describe_endpoint_settings(engine_name) - describe_endpoint_settings(engine_name, params::Dict{String,<:Any}) + describe_conversion_configuration(migration_project_identifier) + describe_conversion_configuration(migration_project_identifier, params::Dict{String,<:Any}) -Returns information about the possible endpoint settings available when you create an -endpoint for a specific database engine. +Returns configuration parameters for a schema conversion project. # Arguments -- `engine_name`: The databse engine used for your source or target endpoint. +- `migration_project_identifier`: The name or Amazon Resource Name (ARN) for the schema + conversion project to describe. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more records - exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so that the remaining results can be retrieved. """ -function describe_endpoint_settings( - EngineName; aws_config::AbstractAWSConfig=global_aws_config() +function describe_conversion_configuration( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeEndpointSettings", - Dict{String,Any}("EngineName" => EngineName); + "DescribeConversionConfiguration", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_endpoint_settings( - EngineName, +function describe_conversion_configuration( + MigrationProjectIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeEndpointSettings", + "DescribeConversionConfiguration", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("EngineName" => EngineName), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_data_providers() + describe_data_providers(params::Dict{String,<:Any}) + +Returns a paginated list of data providers for your account in the current region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the data providers described in the form of key-value + pairs. Valid filter names: data-provider-identifier +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. +""" +function describe_data_providers(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeDataProviders"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_data_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeDataProviders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_endpoint_settings(engine_name) + describe_endpoint_settings(engine_name, params::Dict{String,<:Any}) + +Returns information about the possible endpoint settings available when you create an +endpoint for a specific database engine. + +# Arguments +- `engine_name`: The database engine used for your source or target endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_endpoint_settings( + EngineName; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeEndpointSettings", + Dict{String,Any}("EngineName" => EngineName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_endpoint_settings( + EngineName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeEndpointSettings", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("EngineName" => EngineName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end @@ -1429,6 +1937,37 @@ function describe_endpoints( ) end +""" + describe_engine_versions() + describe_engine_versions(params::Dict{String,<:Any}) + +Returns information about the replication instance versions used in the project. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_engine_versions(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeEngineVersions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_engine_versions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeEngineVersions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_event_categories() describe_event_categories(params::Dict{String,<:Any}) @@ -1536,6 +2075,63 @@ function describe_events( ) end +""" + describe_extension_pack_associations(migration_project_identifier) + describe_extension_pack_associations(migration_project_identifier, params::Dict{String,<:Any}) + +Returns a paginated list of extension pack associations for the specified migration +project. An extension pack is an add-on module that emulates functions present in a source +database that are required when converting objects to the target database. + +# Arguments +- `migration_project_identifier`: The name or Amazon Resource Name (ARN) for the migration + project. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the extension pack associations described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. +""" +function describe_extension_pack_associations( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeExtensionPackAssociations", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_extension_pack_associations( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeExtensionPackAssociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_fleet_advisor_collectors() describe_fleet_advisor_collectors(params::Dict{String,<:Any}) @@ -1733,36 +2329,35 @@ function describe_fleet_advisor_schemas( end """ - describe_orderable_replication_instances() - describe_orderable_replication_instances(params::Dict{String,<:Any}) + describe_instance_profiles() + describe_instance_profiles(params::Dict{String,<:Any}) -Returns information about the replication instance types that can be created in the -specified region. +Returns a paginated list of instance profiles for your account in the current region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more - records exist than the specified MaxRecords value, a pagination token called a marker is - included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 100. +- `"Filters"`: Filters applied to the instance profiles described in the form of key-value + pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_orderable_replication_instances(; - aws_config::AbstractAWSConfig=global_aws_config() -) +function describe_instance_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) return database_migration_service( - "DescribeOrderableReplicationInstances"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "DescribeInstanceProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end -function describe_orderable_replication_instances( +function describe_instance_profiles( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeOrderableReplicationInstances", + "DescribeInstanceProfiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1770,149 +2365,217 @@ function describe_orderable_replication_instances( end """ - describe_pending_maintenance_actions() - describe_pending_maintenance_actions(params::Dict{String,<:Any}) + describe_metadata_model_assessments(migration_project_identifier) + describe_metadata_model_assessments(migration_project_identifier, params::Dict{String,<:Any}) -For internal use only +Returns a paginated list of metadata model assessments for your account in the current +region. + +# Arguments +- `migration_project_identifier`: The name or Amazon Resource Name (ARN) of the migration + project. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more - records exist than the specified MaxRecords value, a pagination token called a marker is - included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 100. -- `"ReplicationInstanceArn"`: The Amazon Resource Name (ARN) of the replication instance. +- `"Filters"`: Filters applied to the metadata model assessments described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_pending_maintenance_actions(; - aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_assessments( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribePendingMaintenanceActions"; + "DescribeMetadataModelAssessments", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_pending_maintenance_actions( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_assessments( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribePendingMaintenanceActions", - params; + "DescribeMetadataModelAssessments", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - describe_recommendation_limitations() - describe_recommendation_limitations(params::Dict{String,<:Any}) + describe_metadata_model_conversions(migration_project_identifier) + describe_metadata_model_conversions(migration_project_identifier, params::Dict{String,<:Any}) -Returns a paginated list of limitations for recommendations of target Amazon Web Services -engines. +Returns a paginated list of metadata model conversions for a migration project. + +# Arguments +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to the limitations described in the form of key-value pairs. +- `"Filters"`: Filters applied to the metadata model conversions described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. - `"MaxRecords"`: The maximum number of records to include in the response. If more records - exist than the specified MaxRecords value, Fleet Advisor includes a pagination token in the - response so that you can retrieve the remaining results. -- `"NextToken"`: Specifies the unique pagination token that makes it possible to display - the next page of results. If this parameter is specified, the response includes only - records beyond the marker, up to the value specified by MaxRecords. If NextToken is - returned by a previous response, there are more results available. The value of NextToken - is a unique pagination token for each page. Make the call again using the returned token to - retrieve the next page. Keep all other arguments unchanged. + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_recommendation_limitations(; - aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_conversions( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeRecommendationLimitations"; + "DescribeMetadataModelConversions", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_recommendation_limitations( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_conversions( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeRecommendationLimitations", - params; + "DescribeMetadataModelConversions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - describe_recommendations() - describe_recommendations(params::Dict{String,<:Any}) + describe_metadata_model_exports_as_script(migration_project_identifier) + describe_metadata_model_exports_as_script(migration_project_identifier, params::Dict{String,<:Any}) -Returns a paginated list of target engine recommendations for your source databases. +Returns a paginated list of metadata model exports. + +# Arguments +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to the target engine recommendations described in the form - of key-value pairs. +- `"Filters"`: Filters applied to the metadata model exports described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. - `"MaxRecords"`: The maximum number of records to include in the response. If more records - exist than the specified MaxRecords value, Fleet Advisor includes a pagination token in the - response so that you can retrieve the remaining results. -- `"NextToken"`: Specifies the unique pagination token that makes it possible to display - the next page of results. If this parameter is specified, the response includes only - records beyond the marker, up to the value specified by MaxRecords. If NextToken is - returned by a previous response, there are more results available. The value of NextToken - is a unique pagination token for each page. Make the call again using the returned token to - retrieve the next page. Keep all other arguments unchanged. + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_metadata_model_exports_as_script( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) return database_migration_service( - "DescribeRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeMetadataModelExportsAsScript", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function describe_recommendations( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_exports_as_script( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeRecommendations", - params; + "DescribeMetadataModelExportsAsScript", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - describe_refresh_schemas_status(endpoint_arn) - describe_refresh_schemas_status(endpoint_arn, params::Dict{String,<:Any}) + describe_metadata_model_exports_to_target(migration_project_identifier) + describe_metadata_model_exports_to_target(migration_project_identifier, params::Dict{String,<:Any}) -Returns the status of the RefreshSchemas operation. +Returns a paginated list of metadata model exports. # Arguments -- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the - endpoint. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the metadata model exports described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_refresh_schemas_status( - EndpointArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_metadata_model_exports_to_target( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeRefreshSchemasStatus", - Dict{String,Any}("EndpointArn" => EndpointArn); + "DescribeMetadataModelExportsToTarget", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_refresh_schemas_status( - EndpointArn, +function describe_metadata_model_exports_to_target( + MigrationProjectIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeRefreshSchemasStatus", + "DescribeMetadataModelExportsToTarget", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1920,45 +2583,49 @@ function describe_refresh_schemas_status( end """ - describe_replication_instance_task_logs(replication_instance_arn) - describe_replication_instance_task_logs(replication_instance_arn, params::Dict{String,<:Any}) + describe_metadata_model_imports(migration_project_identifier) + describe_metadata_model_imports(migration_project_identifier, params::Dict{String,<:Any}) -Returns information about the task logs for the specified task. +Returns a paginated list of metadata model imports. # Arguments -- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more - records exist than the specified MaxRecords value, a pagination token called a marker is - included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 100. -""" -function describe_replication_instance_task_logs( - ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +- `"Filters"`: Filters applied to the metadata model imports described in the form of + key-value pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: A paginated list of metadata model imports. +""" +function describe_metadata_model_imports( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationInstanceTaskLogs", - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); + "DescribeMetadataModelImports", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_replication_instance_task_logs( - ReplicationInstanceArn, +function describe_metadata_model_imports( + MigrationProjectIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeReplicationInstanceTaskLogs", + "DescribeMetadataModelImports", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), params, ), ); @@ -1968,36 +2635,35 @@ function describe_replication_instance_task_logs( end """ - describe_replication_instances() - describe_replication_instances(params::Dict{String,<:Any}) + describe_migration_projects() + describe_migration_projects(params::Dict{String,<:Any}) -Returns information about replication instances for your account in the current region. +Returns a paginated list of migration projects for your account in the current region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to replication instances. Valid filter names: - replication-instance-arn | replication-instance-id | replication-instance-class | - engine-version -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more - records exist than the specified MaxRecords value, a pagination token called a marker is - included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 100. +- `"Filters"`: Filters applied to the migration projects described in the form of key-value + pairs. +- `"Marker"`: Specifies the unique pagination token that makes it possible to display the + next page of results. If this parameter is specified, the response includes only records + beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a + previous response, there are more results available. The value of Marker is a unique + pagination token for each page. To retrieve the next page, make the call again using the + returned token and keeping all other arguments unchanged. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, DMS includes a pagination token in the response + so that you can retrieve the remaining results. """ -function describe_replication_instances(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_migration_projects(; aws_config::AbstractAWSConfig=global_aws_config()) return database_migration_service( - "DescribeReplicationInstances"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "DescribeMigrationProjects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end -function describe_replication_instances( +function describe_migration_projects( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationInstances", + "DescribeMigrationProjects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2005,15 +2671,14 @@ function describe_replication_instances( end """ - describe_replication_subnet_groups() - describe_replication_subnet_groups(params::Dict{String,<:Any}) + describe_orderable_replication_instances() + describe_orderable_replication_instances(params::Dict{String,<:Any}) -Returns information about the replication subnet groups. +Returns information about the replication instance types that can be created in the +specified region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to replication subnet groups. Valid filter names: - replication-subnet-group-id - `"Marker"`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -2022,20 +2687,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. """ -function describe_replication_subnet_groups(; +function describe_orderable_replication_instances(; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationSubnetGroups"; + "DescribeOrderableReplicationInstances"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_replication_subnet_groups( +function describe_orderable_replication_instances( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationSubnetGroups", + "DescribeOrderableReplicationInstances", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2043,16 +2708,14 @@ function describe_replication_subnet_groups( end """ - describe_replication_task_assessment_results() - describe_replication_task_assessment_results(params::Dict{String,<:Any}) + describe_pending_maintenance_actions() + describe_pending_maintenance_actions(params::Dict{String,<:Any}) -Returns the task assessment results from the Amazon S3 bucket that DMS creates in your -Amazon Web Services account. This action always returns the latest results. For more -information about DMS task assessments, see Creating a task assessment report in the -Database Migration Service User Guide. +For internal use only # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: - `"Marker"`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -2060,24 +2723,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. -- `"ReplicationTaskArn"`: The Amazon Resource Name (ARN) string that uniquely identifies - the task. When this input parameter is specified, the API returns only one result and - ignore the values of the MaxRecords and Marker parameters. +- `"ReplicationInstanceArn"`: The Amazon Resource Name (ARN) of the replication instance. """ -function describe_replication_task_assessment_results(; +function describe_pending_maintenance_actions(; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskAssessmentResults"; + "DescribePendingMaintenanceActions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_replication_task_assessment_results( +function describe_pending_maintenance_actions( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskAssessmentResults", + "DescribePendingMaintenanceActions", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2085,41 +2746,39 @@ function describe_replication_task_assessment_results( end """ - describe_replication_task_assessment_runs() - describe_replication_task_assessment_runs(params::Dict{String,<:Any}) + describe_recommendation_limitations() + describe_recommendation_limitations(params::Dict{String,<:Any}) -Returns a paginated list of premigration assessment runs based on filter settings. These -filter settings can specify a combination of premigration assessment runs, migration tasks, -replication instances, and assessment run status values. This operation doesn't return -information about individual assessments. For this information, see the -DescribeReplicationTaskIndividualAssessments operation. +Returns a paginated list of limitations for recommendations of target Amazon Web Services +engines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to the premigration assessment runs described in the form of - key-value pairs. Valid filter names: replication-task-assessment-run-arn, - replication-task-arn, replication-instance-arn, status -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. +- `"Filters"`: Filters applied to the limitations described in the form of key-value pairs. - `"MaxRecords"`: The maximum number of records to include in the response. If more records - exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so that the remaining results can be retrieved. + exist than the specified MaxRecords value, Fleet Advisor includes a pagination token in the + response so that you can retrieve the remaining results. +- `"NextToken"`: Specifies the unique pagination token that makes it possible to display + the next page of results. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by MaxRecords. If NextToken is + returned by a previous response, there are more results available. The value of NextToken + is a unique pagination token for each page. Make the call again using the returned token to + retrieve the next page. Keep all other arguments unchanged. """ -function describe_replication_task_assessment_runs(; +function describe_recommendation_limitations(; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskAssessmentRuns"; + "DescribeRecommendationLimitations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_replication_task_assessment_runs( +function describe_recommendation_limitations( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskAssessmentRuns", + "DescribeRecommendationLimitations", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2127,77 +2786,104 @@ function describe_replication_task_assessment_runs( end """ - describe_replication_task_individual_assessments() - describe_replication_task_individual_assessments(params::Dict{String,<:Any}) + describe_recommendations() + describe_recommendations(params::Dict{String,<:Any}) -Returns a paginated list of individual assessments based on filter settings. These filter -settings can specify a combination of premigration assessment runs, migration tasks, and -assessment status values. +Returns a paginated list of target engine recommendations for your source databases. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to the individual assessments described in the form of - key-value pairs. Valid filter names: replication-task-assessment-run-arn, - replication-task-arn, status -- `"Marker"`: An optional pagination token provided by a previous request. If this - parameter is specified, the response includes only records beyond the marker, up to the - value specified by MaxRecords. +- `"Filters"`: Filters applied to the target engine recommendations described in the form + of key-value pairs. - `"MaxRecords"`: The maximum number of records to include in the response. If more records - exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so that the remaining results can be retrieved. + exist than the specified MaxRecords value, Fleet Advisor includes a pagination token in the + response so that you can retrieve the remaining results. +- `"NextToken"`: Specifies the unique pagination token that makes it possible to display + the next page of results. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by MaxRecords. If NextToken is + returned by a previous response, there are more results available. The value of NextToken + is a unique pagination token for each page. Make the call again using the returned token to + retrieve the next page. Keep all other arguments unchanged. """ -function describe_replication_task_individual_assessments(; - aws_config::AbstractAWSConfig=global_aws_config() +function describe_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskIndividualAssessments"; + "DescribeRecommendations", + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_replication_task_individual_assessments( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + +""" + describe_refresh_schemas_status(endpoint_arn) + describe_refresh_schemas_status(endpoint_arn, params::Dict{String,<:Any}) + +Returns the status of the RefreshSchemas operation. + +# Arguments +- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the + endpoint. + +""" +function describe_refresh_schemas_status( + EndpointArn; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTaskIndividualAssessments", - params; + "DescribeRefreshSchemasStatus", + Dict{String,Any}("EndpointArn" => EndpointArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_refresh_schemas_status( + EndpointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeRefreshSchemasStatus", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - describe_replication_tasks() - describe_replication_tasks(params::Dict{String,<:Any}) + describe_replication_configs() + describe_replication_configs(params::Dict{String,<:Any}) -Returns information about replication tasks for your account in the current region. +Returns one or more existing DMS Serverless replication configurations as a list of +structures. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to replication tasks. Valid filter names: - replication-task-arn | replication-task-id | migration-type | endpoint-arn | - replication-instance-arn -- `"Marker"`: An optional pagination token provided by a previous request. If this +- `"Filters"`: Filters applied to the replication configs. +- `"Marker"`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. -- `"MaxRecords"`: The maximum number of records to include in the response. If more - records exist than the specified MaxRecords value, a pagination token called a marker is - included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 100. -- `"WithoutSettings"`: An option to set to avoid returning information about settings. Use - this to reduce overhead when setting information is too large. To use this option, choose - true; otherwise, choose false (the default). +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. """ -function describe_replication_tasks(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_replication_configs(; aws_config::AbstractAWSConfig=global_aws_config()) return database_migration_service( - "DescribeReplicationTasks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeReplicationConfigs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end -function describe_replication_tasks( +function describe_replication_configs( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeReplicationTasks", + "DescribeReplicationConfigs", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2205,14 +2891,13 @@ function describe_replication_tasks( end """ - describe_schemas(endpoint_arn) - describe_schemas(endpoint_arn, params::Dict{String,<:Any}) + describe_replication_instance_task_logs(replication_instance_arn) + describe_replication_instance_task_logs(replication_instance_arn, params::Dict{String,<:Any}) -Returns information about the schema for the specified endpoint. +Returns information about the task logs for the specified task. # Arguments -- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the - endpoint. +- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2224,23 +2909,29 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys included in the response so that the remaining results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. """ -function describe_schemas(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_replication_instance_task_logs( + ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) return database_migration_service( - "DescribeSchemas", - Dict{String,Any}("EndpointArn" => EndpointArn); + "DescribeReplicationInstanceTaskLogs", + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_schemas( - EndpointArn, +function describe_replication_instance_task_logs( + ReplicationInstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "DescribeSchemas", + "DescribeReplicationInstanceTaskLogs", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + mergewith( + _merge, + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2248,97 +2939,121 @@ function describe_schemas( end """ - describe_table_statistics(replication_task_arn) - describe_table_statistics(replication_task_arn, params::Dict{String,<:Any}) + describe_replication_instances() + describe_replication_instances(params::Dict{String,<:Any}) -Returns table statistics on the database migration task, including table name, rows -inserted, rows updated, and rows deleted. Note that the \"last updated\" column the DMS -console only indicates the time that DMS last updated the table statistics record for a -table. It does not indicate the time of the last update to the table. +Returns information about replication instances for your account in the current region. -# Arguments -- `replication_task_arn`: The Amazon Resource Name (ARN) of the replication task. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to replication instances. Valid filter names: + replication-instance-arn | replication-instance-id | replication-instance-class | + engine-version +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that the remaining results can be retrieved. Default: 100 + Constraints: Minimum 20, maximum 100. +""" +function describe_replication_instances(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeReplicationInstances"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_replication_instances( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationInstances", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_replication_subnet_groups() + describe_replication_subnet_groups(params::Dict{String,<:Any}) + +Returns information about the replication subnet groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: Filters applied to table statistics. Valid filter names: schema-name | - table-name | table-state A combination of filters creates an AND condition where each - record matches all specified filters. +- `"Filters"`: Filters applied to replication subnet groups. Valid filter names: + replication-subnet-group-id - `"Marker"`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. Default: 100 - Constraints: Minimum 20, maximum 500. + Constraints: Minimum 20, maximum 100. """ -function describe_table_statistics( - ReplicationTaskArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_replication_subnet_groups(; + aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeTableStatistics", - Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn); + "DescribeReplicationSubnetGroups"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_table_statistics( - ReplicationTaskArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), +function describe_replication_subnet_groups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "DescribeTableStatistics", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn), params - ), - ); + "DescribeReplicationSubnetGroups", + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - import_certificate(certificate_identifier) - import_certificate(certificate_identifier, params::Dict{String,<:Any}) + describe_replication_table_statistics(replication_config_arn) + describe_replication_table_statistics(replication_config_arn, params::Dict{String,<:Any}) -Uploads the specified certificate. +Returns table and schema statistics for one or more provisioned replications that use a +given DMS Serverless replication configuration. # Arguments -- `certificate_identifier`: A customer-assigned name for the certificate. Identifiers must - begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't - end with a hyphen or contain two consecutive hyphens. +- `replication_config_arn`: The replication config to describe. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CertificatePem"`: The contents of a .pem file, which contains an X.509 certificate. -- `"CertificateWallet"`: The location of an imported Oracle Wallet certificate for use with - SSL. Provide the name of a .sso file using the fileb:// prefix. You can't provide the - certificate inline. Example: filebase64(\"{path.root}/rds-ca-2019-root.sso\") -- `"Tags"`: The tags associated with the certificate. +- `"Filters"`: Filters applied to the replication table statistics. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. """ -function import_certificate( - CertificateIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +function describe_replication_table_statistics( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ImportCertificate", - Dict{String,Any}("CertificateIdentifier" => CertificateIdentifier); + "DescribeReplicationTableStatistics", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function import_certificate( - CertificateIdentifier, +function describe_replication_table_statistics( + ReplicationConfigArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ImportCertificate", + "DescribeReplicationTableStatistics", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("CertificateIdentifier" => CertificateIdentifier), + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), params, ), ); @@ -2348,33 +3063,41 @@ function import_certificate( end """ - list_tags_for_resource() - list_tags_for_resource(params::Dict{String,<:Any}) + describe_replication_task_assessment_results() + describe_replication_task_assessment_results(params::Dict{String,<:Any}) -Lists all metadata tags attached to an DMS resource, including replication instance, -endpoint, subnet group, and migration task. For more information, see Tag data type -description. +Returns the task assessment results from the Amazon S3 bucket that DMS creates in your +Amazon Web Services account. This action always returns the latest results. For more +information about DMS task assessments, see Creating a task assessment report in the +Database Migration Service User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ResourceArn"`: The Amazon Resource Name (ARN) string that uniquely identifies the DMS - resource to list tags for. This returns a list of keys (names of tags) created for the - resource and their associated tag values. -- `"ResourceArnList"`: List of ARNs that identify multiple DMS resources that you want to - list tags for. This returns a list of keys (tag names) and their associated tag values. It - also returns each tag's associated ResourceArn value, which is the ARN of the resource for - which each listed tag is created. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that the remaining results can be retrieved. Default: 100 + Constraints: Minimum 20, maximum 100. +- `"ReplicationTaskArn"`: The Amazon Resource Name (ARN) string that uniquely identifies + the task. When this input parameter is specified, the API returns only one result and + ignore the values of the MaxRecords and Marker parameters. """ -function list_tags_for_resource(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_replication_task_assessment_results(; + aws_config::AbstractAWSConfig=global_aws_config() +) return database_migration_service( - "ListTagsForResource"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeReplicationTaskAssessmentResults"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function list_tags_for_resource( +function describe_replication_task_assessment_results( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ListTagsForResource", + "DescribeReplicationTaskAssessmentResults", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2382,86 +3105,576 @@ function list_tags_for_resource( end """ - modify_endpoint(endpoint_arn) - modify_endpoint(endpoint_arn, params::Dict{String,<:Any}) + describe_replication_task_assessment_runs() + describe_replication_task_assessment_runs(params::Dict{String,<:Any}) -Modifies the specified endpoint. For a MySQL source or target endpoint, don't explicitly -specify the database using the DatabaseName request parameter on the ModifyEndpoint API -call. Specifying DatabaseName when you modify a MySQL endpoint replicates all the task -tables to this single database. For MySQL endpoints, you specify the database only when you -specify the schema in the table-mapping rules of the DMS task. +Returns a paginated list of premigration assessment runs based on filter settings. These +filter settings can specify a combination of premigration assessment runs, migration tasks, +replication instances, and assessment run status values. This operation doesn't return +information about individual assessments. For this information, see the +DescribeReplicationTaskIndividualAssessments operation. -# Arguments -- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the - endpoint. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the premigration assessment runs described in the form of + key-value pairs. Valid filter names: replication-task-assessment-run-arn, + replication-task-arn, replication-instance-arn, status +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replication_task_assessment_runs(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTaskAssessmentRuns"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_replication_task_assessment_runs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTaskAssessmentRuns", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_replication_task_individual_assessments() + describe_replication_task_individual_assessments(params::Dict{String,<:Any}) + +Returns a paginated list of individual assessments based on filter settings. These filter +settings can specify a combination of premigration assessment runs, migration tasks, and +assessment status values. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CertificateArn"`: The Amazon Resource Name (ARN) of the certificate used for SSL - connection. -- `"DatabaseName"`: The name of the endpoint database. For a MySQL source or target - endpoint, do not specify DatabaseName. -- `"DmsTransferSettings"`: The settings in JSON format for the DMS transfer type of source - endpoint. Attributes include the following: serviceAccessRoleArn - The Amazon Resource - Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole - action. BucketName - The name of the S3 bucket to use. Shorthand syntax for these - settings is as follows: ServiceAccessRoleArn=string ,BucketName=string JSON syntax for - these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": - \"string\"} -- `"DocDbSettings"`: Settings in JSON format for the source DocumentDB endpoint. For more - information about the available settings, see the configuration properties section in - Using DocumentDB as a Target for Database Migration Service in the Database Migration - Service User Guide. -- `"DynamoDbSettings"`: Settings in JSON format for the target Amazon DynamoDB endpoint. - For information about other available settings, see Using Object Mapping to Migrate Data to - DynamoDB in the Database Migration Service User Guide. -- `"ElasticsearchSettings"`: Settings in JSON format for the target OpenSearch endpoint. - For more information about the available settings, see Extra Connection Attributes When - Using OpenSearch as a Target for DMS in the Database Migration Service User Guide. -- `"EndpointIdentifier"`: The database endpoint identifier. Identifiers must begin with a - letter and must contain only ASCII letters, digits, and hyphens. They can't end with a - hyphen or contain two consecutive hyphens. -- `"EndpointType"`: The type of endpoint. Valid values are source and target. -- `"EngineName"`: The database engine name. Valid values, depending on the EndpointType, - include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", - \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"db2-zos\", \"azuredb\", \"sybase\", - \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", - \"sqlserver\", \"neptune\", and \"babelfish\". -- `"ExactSettings"`: If this attribute is Y, the current call to ModifyEndpoint replaces - all existing endpoint settings with the exact settings that you specify in this call. If - this attribute is N, the current call to ModifyEndpoint does two things: It replaces any - endpoint settings that already exist with new values, for settings with the same names. - It creates new endpoint settings that you specify in the call, for settings with different - names. For example, if you call create-endpoint ... --endpoint-settings '{\"a\":1}' ..., - the endpoint has the following endpoint settings: '{\"a\":1}'. If you then call - modify-endpoint ... --endpoint-settings '{\"b\":2}' ... for the same endpoint, the endpoint - has the following settings: '{\"a\":1,\"b\":2}'. However, suppose that you follow this - with a call to modify-endpoint ... --endpoint-settings '{\"b\":2}' --exact-settings ... for - that same endpoint again. Then the endpoint has the following settings: '{\"b\":2}'. All - existing settings are replaced with the exact settings that you specify. -- `"ExternalTableDefinition"`: The external table definition. -- `"ExtraConnectionAttributes"`: Additional attributes associated with the connection. To - reset this parameter, pass the empty string (\"\") as an argument. -- `"GcpMySQLSettings"`: Settings in JSON format for the source GCP MySQL endpoint. -- `"IBMDb2Settings"`: Settings in JSON format for the source IBM Db2 LUW endpoint. For - information about other available settings, see Extra connection attributes when using Db2 - LUW as a source for DMS in the Database Migration Service User Guide. -- `"KafkaSettings"`: Settings in JSON format for the target Apache Kafka endpoint. For more - information about the available settings, see Using object mapping to migrate data to a - Kafka topic in the Database Migration Service User Guide. -- `"KinesisSettings"`: Settings in JSON format for the target endpoint for Amazon Kinesis - Data Streams. For more information about the available settings, see Using object mapping - to migrate data to a Kinesis data stream in the Database Migration Service User Guide. -- `"MicrosoftSQLServerSettings"`: Settings in JSON format for the source and target - Microsoft SQL Server endpoint. For information about other available settings, see Extra - connection attributes when using SQL Server as a source for DMS and Extra connection - attributes when using SQL Server as a target for DMS in the Database Migration Service User - Guide. -- `"MongoDbSettings"`: Settings in JSON format for the source MongoDB endpoint. For more - information about the available settings, see the configuration properties section in - Endpoint configuration settings when using MongoDB as a source for Database Migration - Service in the Database Migration Service User Guide. -- `"MySQLSettings"`: Settings in JSON format for the source and target MySQL endpoint. For +- `"Filters"`: Filters applied to the individual assessments described in the form of + key-value pairs. Valid filter names: replication-task-assessment-run-arn, + replication-task-arn, status +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replication_task_individual_assessments(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTaskIndividualAssessments"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_replication_task_individual_assessments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTaskIndividualAssessments", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_replication_tasks() + describe_replication_tasks(params::Dict{String,<:Any}) + +Returns information about replication tasks for your account in the current region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to replication tasks. Valid filter names: + replication-task-arn | replication-task-id | migration-type | endpoint-arn | + replication-instance-arn +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that the remaining results can be retrieved. Default: 100 + Constraints: Minimum 20, maximum 100. +- `"WithoutSettings"`: An option to set to avoid returning information about settings. Use + this to reduce overhead when setting information is too large. To use this option, choose + true; otherwise, choose false (the default). +""" +function describe_replication_tasks(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeReplicationTasks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_replication_tasks( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTasks", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_replications() + describe_replications(params::Dict{String,<:Any}) + +Provides details on replication progress by returning status information for one or more +provisioned DMS Serverless replications. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the replications. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replications(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeReplications"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_replications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_schemas(endpoint_arn) + describe_schemas(endpoint_arn, params::Dict{String,<:Any}) + +Returns information about the schema for the specified endpoint. + +# Arguments +- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the + endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that the remaining results can be retrieved. Default: 100 + Constraints: Minimum 20, maximum 100. +""" +function describe_schemas(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeSchemas", + Dict{String,Any}("EndpointArn" => EndpointArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_schemas( + EndpointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeSchemas", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_table_statistics(replication_task_arn) + describe_table_statistics(replication_task_arn, params::Dict{String,<:Any}) + +Returns table statistics on the database migration task, including table name, rows +inserted, rows updated, and rows deleted. Note that the \"last updated\" column the DMS +console only indicates the time that DMS last updated the table statistics record for a +table. It does not indicate the time of the last update to the table. + +# Arguments +- `replication_task_arn`: The Amazon Resource Name (ARN) of the replication task. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to table statistics. Valid filter names: schema-name | + table-name | table-state A combination of filters creates an AND condition where each + record matches all specified filters. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that the remaining results can be retrieved. Default: 100 + Constraints: Minimum 20, maximum 500. +""" +function describe_table_statistics( + ReplicationTaskArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeTableStatistics", + Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_table_statistics( + ReplicationTaskArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeTableStatistics", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + export_metadata_model_assessment(migration_project_identifier, selection_rules) + export_metadata_model_assessment(migration_project_identifier, selection_rules, params::Dict{String,<:Any}) + +Saves a copy of a database migration assessment report to your Amazon S3 bucket. DMS can +save your assessment report as a comma-separated value (CSV) or a PDF file. + +# Arguments +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `selection_rules`: A value that specifies the database objects to assess. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AssessmentReportTypes"`: The file format of the assessment file. +- `"FileName"`: The name of the assessment file to create in your Amazon S3 bucket. +""" +function export_metadata_model_assessment( + MigrationProjectIdentifier, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ExportMetadataModelAssessment", + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function export_metadata_model_assessment( + MigrationProjectIdentifier, + SelectionRules, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ExportMetadataModelAssessment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_certificate(certificate_identifier) + import_certificate(certificate_identifier, params::Dict{String,<:Any}) + +Uploads the specified certificate. + +# Arguments +- `certificate_identifier`: A customer-assigned name for the certificate. Identifiers must + begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't + end with a hyphen or contain two consecutive hyphens. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CertificatePem"`: The contents of a .pem file, which contains an X.509 certificate. +- `"CertificateWallet"`: The location of an imported Oracle Wallet certificate for use with + SSL. Provide the name of a .sso file using the fileb:// prefix. You can't provide the + certificate inline. Example: filebase64(\"{path.root}/rds-ca-2019-root.sso\") +- `"Tags"`: The tags associated with the certificate. +""" +function import_certificate( + CertificateIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ImportCertificate", + Dict{String,Any}("CertificateIdentifier" => CertificateIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_certificate( + CertificateIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ImportCertificate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("CertificateIdentifier" => CertificateIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource() + list_tags_for_resource(params::Dict{String,<:Any}) + +Lists all metadata tags attached to an DMS resource, including replication instance, +endpoint, subnet group, and migration task. For more information, see Tag data type +description. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: The Amazon Resource Name (ARN) string that uniquely identifies the DMS + resource to list tags for. This returns a list of keys (names of tags) created for the + resource and their associated tag values. +- `"ResourceArnList"`: List of ARNs that identify multiple DMS resources that you want to + list tags for. This returns a list of keys (tag names) and their associated tag values. It + also returns each tag's associated ResourceArn value, which is the ARN of the resource for + which each listed tag is created. +""" +function list_tags_for_resource(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "ListTagsForResource"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_tags_for_resource( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ListTagsForResource", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_conversion_configuration(conversion_configuration, migration_project_identifier) + modify_conversion_configuration(conversion_configuration, migration_project_identifier, params::Dict{String,<:Any}) + +Modifies the specified schema conversion configuration using the provided parameters. + +# Arguments +- `conversion_configuration`: The new conversion configuration. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). + +""" +function modify_conversion_configuration( + ConversionConfiguration, + MigrationProjectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyConversionConfiguration", + Dict{String,Any}( + "ConversionConfiguration" => ConversionConfiguration, + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_conversion_configuration( + ConversionConfiguration, + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyConversionConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConversionConfiguration" => ConversionConfiguration, + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_data_provider(data_provider_identifier) + modify_data_provider(data_provider_identifier, params::Dict{String,<:Any}) + +Modifies the specified data provider using the provided settings. You must remove the data +provider from all migration projects before you can modify it. + +# Arguments +- `data_provider_identifier`: The identifier of the data provider. Identifiers must begin + with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with + a hyphen, or contain two consecutive hyphens. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DataProviderName"`: The name of the data provider. +- `"Description"`: A user-friendly description of the data provider. +- `"Engine"`: The type of database engine for the data provider. Valid values include + \"aurora\", \"aurora-postgresql\", \"mysql\", \"oracle\", \"postgres\", \"sqlserver\", + redshift, mariadb, mongodb, and docdb. A value of \"aurora\" represents Amazon Aurora + MySQL-Compatible Edition. +- `"ExactSettings"`: If this attribute is Y, the current call to ModifyDataProvider + replaces all existing data provider settings with the exact settings that you specify in + this call. If this attribute is N, the current call to ModifyDataProvider does two things: + It replaces any data provider settings that already exist with new values, for settings + with the same names. It creates new data provider settings that you specify in the call, + for settings with different names. +- `"Settings"`: The settings in JSON format for a data provider. +""" +function modify_data_provider( + DataProviderIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyDataProvider", + Dict{String,Any}("DataProviderIdentifier" => DataProviderIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_data_provider( + DataProviderIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyDataProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DataProviderIdentifier" => DataProviderIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_endpoint(endpoint_arn) + modify_endpoint(endpoint_arn, params::Dict{String,<:Any}) + +Modifies the specified endpoint. For a MySQL source or target endpoint, don't explicitly +specify the database using the DatabaseName request parameter on the ModifyEndpoint API +call. Specifying DatabaseName when you modify a MySQL endpoint replicates all the task +tables to this single database. For MySQL endpoints, you specify the database only when you +specify the schema in the table-mapping rules of the DMS task. + +# Arguments +- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the + endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CertificateArn"`: The Amazon Resource Name (ARN) of the certificate used for SSL + connection. +- `"DatabaseName"`: The name of the endpoint database. For a MySQL source or target + endpoint, do not specify DatabaseName. +- `"DmsTransferSettings"`: The settings in JSON format for the DMS transfer type of source + endpoint. Attributes include the following: serviceAccessRoleArn - The Amazon Resource + Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole + action. BucketName - The name of the S3 bucket to use. Shorthand syntax for these + settings is as follows: ServiceAccessRoleArn=string ,BucketName=string JSON syntax for + these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": + \"string\"} +- `"DocDbSettings"`: Settings in JSON format for the source DocumentDB endpoint. For more + information about the available settings, see the configuration properties section in + Using DocumentDB as a Target for Database Migration Service in the Database Migration + Service User Guide. +- `"DynamoDbSettings"`: Settings in JSON format for the target Amazon DynamoDB endpoint. + For information about other available settings, see Using Object Mapping to Migrate Data to + DynamoDB in the Database Migration Service User Guide. +- `"ElasticsearchSettings"`: Settings in JSON format for the target OpenSearch endpoint. + For more information about the available settings, see Extra Connection Attributes When + Using OpenSearch as a Target for DMS in the Database Migration Service User Guide. +- `"EndpointIdentifier"`: The database endpoint identifier. Identifiers must begin with a + letter and must contain only ASCII letters, digits, and hyphens. They can't end with a + hyphen or contain two consecutive hyphens. +- `"EndpointType"`: The type of endpoint. Valid values are source and target. +- `"EngineName"`: The database engine name. Valid values, depending on the EndpointType, + include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", + \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"db2-zos\", \"azuredb\", \"sybase\", + \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", + \"sqlserver\", \"neptune\", and \"babelfish\". +- `"ExactSettings"`: If this attribute is Y, the current call to ModifyEndpoint replaces + all existing endpoint settings with the exact settings that you specify in this call. If + this attribute is N, the current call to ModifyEndpoint does two things: It replaces any + endpoint settings that already exist with new values, for settings with the same names. + It creates new endpoint settings that you specify in the call, for settings with different + names. For example, if you call create-endpoint ... --endpoint-settings '{\"a\":1}' ..., + the endpoint has the following endpoint settings: '{\"a\":1}'. If you then call + modify-endpoint ... --endpoint-settings '{\"b\":2}' ... for the same endpoint, the endpoint + has the following settings: '{\"a\":1,\"b\":2}'. However, suppose that you follow this + with a call to modify-endpoint ... --endpoint-settings '{\"b\":2}' --exact-settings ... for + that same endpoint again. Then the endpoint has the following settings: '{\"b\":2}'. All + existing settings are replaced with the exact settings that you specify. +- `"ExternalTableDefinition"`: The external table definition. +- `"ExtraConnectionAttributes"`: Additional attributes associated with the connection. To + reset this parameter, pass the empty string (\"\") as an argument. +- `"GcpMySQLSettings"`: Settings in JSON format for the source GCP MySQL endpoint. +- `"IBMDb2Settings"`: Settings in JSON format for the source IBM Db2 LUW endpoint. For + information about other available settings, see Extra connection attributes when using Db2 + LUW as a source for DMS in the Database Migration Service User Guide. +- `"KafkaSettings"`: Settings in JSON format for the target Apache Kafka endpoint. For more + information about the available settings, see Using object mapping to migrate data to a + Kafka topic in the Database Migration Service User Guide. +- `"KinesisSettings"`: Settings in JSON format for the target endpoint for Amazon Kinesis + Data Streams. For more information about the available settings, see Using object mapping + to migrate data to a Kinesis data stream in the Database Migration Service User Guide. +- `"MicrosoftSQLServerSettings"`: Settings in JSON format for the source and target + Microsoft SQL Server endpoint. For information about other available settings, see Extra + connection attributes when using SQL Server as a source for DMS and Extra connection + attributes when using SQL Server as a target for DMS in the Database Migration Service User + Guide. +- `"MongoDbSettings"`: Settings in JSON format for the source MongoDB endpoint. For more + information about the available settings, see the configuration properties section in + Endpoint configuration settings when using MongoDB as a source for Database Migration + Service in the Database Migration Service User Guide. +- `"MySQLSettings"`: Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for DMS and Extra connection attributes when using a MySQL-compatible database as a target for DMS in the Database Migration Service User Guide. @@ -2492,25 +3705,517 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide. +- `"TimestreamSettings"`: Settings in JSON format for the target Amazon Timestream endpoint. - `"Username"`: The user name to be used to login to the endpoint database. """ -function modify_endpoint(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) +function modify_endpoint(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "ModifyEndpoint", + Dict{String,Any}("EndpointArn" => EndpointArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_endpoint( + EndpointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyEndpoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_event_subscription(subscription_name) + modify_event_subscription(subscription_name, params::Dict{String,<:Any}) + +Modifies an existing DMS event notification subscription. + +# Arguments +- `subscription_name`: The name of the DMS event notification subscription to be modified. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Enabled"`: A Boolean value; set to true to activate the subscription. +- `"EventCategories"`: A list of event categories for a source type that you want to + subscribe to. Use the DescribeEventCategories action to see a list of event categories. +- `"SnsTopicArn"`: The Amazon Resource Name (ARN) of the Amazon SNS topic created for + event notification. The ARN is created by Amazon SNS when you create a topic and subscribe + to it. +- `"SourceType"`: The type of DMS resource that generates the events you want to subscribe + to. Valid values: replication-instance | replication-task +""" +function modify_event_subscription( + SubscriptionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyEventSubscription", + Dict{String,Any}("SubscriptionName" => SubscriptionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_event_subscription( + SubscriptionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyEventSubscription", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("SubscriptionName" => SubscriptionName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_instance_profile(instance_profile_identifier) + modify_instance_profile(instance_profile_identifier, params::Dict{String,<:Any}) + +Modifies the specified instance profile using the provided parameters. All migration +projects associated with the instance profile must be deleted or modified before you can +modify the instance profile. + +# Arguments +- `instance_profile_identifier`: The identifier of the instance profile. Identifiers must + begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't + end with a hyphen, or contain two consecutive hyphens. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AvailabilityZone"`: The Availability Zone where the instance profile runs. +- `"Description"`: A user-friendly description for the instance profile. +- `"InstanceProfileName"`: A user-friendly name for the instance profile. +- `"KmsKeyArn"`: The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the + connection parameters for the instance profile. If you don't specify a value for the + KmsKeyArn parameter, then DMS uses your default encryption key. KMS creates the default + encryption key for your Amazon Web Services account. Your Amazon Web Services account has a + different default encryption key for each Amazon Web Services Region. +- `"NetworkType"`: Specifies the network type for the instance profile. A value of IPV4 + represents an instance profile with IPv4 network type and only supports IPv4 addressing. A + value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 + addressing. A value of DUAL represents an instance profile with dual network type that + supports IPv4 and IPv6 addressing. +- `"PubliclyAccessible"`: Specifies the accessibility options for the instance profile. A + value of true represents an instance profile with a public IP address. A value of false + represents an instance profile with a private IP address. The default value is true. +- `"SubnetGroupIdentifier"`: A subnet group to associate with the instance profile. +- `"VpcSecurityGroups"`: Specifies the VPC security groups to be used with the instance + profile. The VPC security group must work with the VPC containing the instance profile. +""" +function modify_instance_profile( + InstanceProfileIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyInstanceProfile", + Dict{String,Any}("InstanceProfileIdentifier" => InstanceProfileIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_instance_profile( + InstanceProfileIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyInstanceProfile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstanceProfileIdentifier" => InstanceProfileIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_migration_project(migration_project_identifier) + modify_migration_project(migration_project_identifier, params::Dict{String,<:Any}) + +Modifies the specified migration project using the provided parameters. The migration +project must be closed before you can modify it. + +# Arguments +- `migration_project_identifier`: The identifier of the migration project. Identifiers must + begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't + end with a hyphen, or contain two consecutive hyphens. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A user-friendly description of the migration project. +- `"InstanceProfileIdentifier"`: The name or Amazon Resource Name (ARN) for the instance + profile. +- `"MigrationProjectName"`: A user-friendly name for the migration project. +- `"SchemaConversionApplicationAttributes"`: The schema conversion application attributes, + including the Amazon S3 bucket name and Amazon S3 role ARN. +- `"SourceDataProviderDescriptors"`: Information about the source data provider, including + the name, ARN, and Amazon Web Services Secrets Manager parameters. +- `"TargetDataProviderDescriptors"`: Information about the target data provider, including + the name, ARN, and Amazon Web Services Secrets Manager parameters. +- `"TransformationRules"`: The settings in JSON format for migration rules. Migration rules + make it possible for you to change the object names according to the rules that you + specify. For example, you can change an object name to lowercase or uppercase, add or + remove a prefix or suffix, or rename objects. +""" +function modify_migration_project( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyMigrationProject", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_migration_project( + MigrationProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyMigrationProject", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_replication_config(replication_config_arn) + modify_replication_config(replication_config_arn, params::Dict{String,<:Any}) + +Modifies an existing DMS Serverless replication configuration that you can use to start a +replication. This command includes input validation and logic to check the state of any +replication that uses this configuration. You can only modify a replication configuration +before any replication that uses it has started. As soon as you have initially started a +replication with a given configuiration, you can't modify that configuration, even if you +stop it. Other run statuses that allow you to run this command include FAILED and CREATED. +A provisioning state that allows you to run this command is FAILED_PROVISION. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication to modify. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ComputeConfig"`: Configuration parameters for provisioning an DMS Serverless + replication. +- `"ReplicationConfigIdentifier"`: The new replication config to apply to the replication. +- `"ReplicationSettings"`: The settings for the replication. +- `"ReplicationType"`: The type of replication. +- `"SourceEndpointArn"`: The Amazon Resource Name (ARN) of the source endpoint for this DMS + serverless replication configuration. +- `"SupplementalSettings"`: Additional settings for the replication. +- `"TableMappings"`: Table mappings specified in the replication. +- `"TargetEndpointArn"`: The Amazon Resource Name (ARN) of the target endpoint for this DMS + serverless replication configuration. +""" +function modify_replication_config( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyReplicationConfig", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_replication_config( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_replication_instance(replication_instance_arn) + modify_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) + +Modifies the replication instance to apply new settings. You can change one or more +parameters by specifying these parameters and the new values in the request. Some settings +are applied during the maintenance window. + +# Arguments +- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllocatedStorage"`: The amount of storage (in gigabytes) to be allocated for the + replication instance. +- `"AllowMajorVersionUpgrade"`: Indicates that major version upgrades are allowed. Changing + this parameter does not result in an outage, and the change is asynchronously applied as + soon as possible. This parameter must be set to true when specifying a value for the + EngineVersion parameter that is a different major version than the replication instance's + current version. +- `"ApplyImmediately"`: Indicates whether the changes should be applied immediately or + during the next maintenance window. +- `"AutoMinorVersionUpgrade"`: A value that indicates that minor version upgrades are + applied automatically to the replication instance during the maintenance window. Changing + this parameter doesn't result in an outage, except in the case described following. The + change is asynchronously applied as soon as possible. An outage does result if these + factors apply: This parameter is set to true during the maintenance window. A newer + minor version is available. DMS has enabled automatic patching for the given engine + version. +- `"EngineVersion"`: The engine version number of the replication instance. When modifying + a major engine version of an instance, also set AllowMajorVersionUpgrade to true. +- `"MultiAZ"`: Specifies whether the replication instance is a Multi-AZ deployment. You + can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. +- `"NetworkType"`: The type of IP address protocol used by a replication instance, such as + IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet + supported. +- `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which system + maintenance can occur, which might result in an outage. Changing this parameter does not + result in an outage, except in the following situation, and the change is asynchronously + applied as soon as possible. If moving this window to the current time, there must be at + least 30 minutes between the current time and end of the window to ensure pending changes + are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon + | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes +- `"ReplicationInstanceClass"`: The compute and memory capacity of the replication instance + as defined for the specified replication instance class. For example to specify the + instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information + on the settings and capacities for the available replication instance classes, see + Selecting the right DMS replication instance for your migration. +- `"ReplicationInstanceIdentifier"`: The replication instance identifier. This parameter is + stored as a lowercase string. +- `"VpcSecurityGroupIds"`: Specifies the VPC security group to be used with the + replication instance. The VPC security group must work with the VPC containing the + replication instance. +""" +function modify_replication_instance( + ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyReplicationInstance", + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_replication_instance( + ReplicationInstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationInstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_replication_subnet_group(replication_subnet_group_identifier, subnet_ids) + modify_replication_subnet_group(replication_subnet_group_identifier, subnet_ids, params::Dict{String,<:Any}) + +Modifies the settings for the specified replication subnet group. + +# Arguments +- `replication_subnet_group_identifier`: The name of the replication instance subnet group. +- `subnet_ids`: A list of subnet IDs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ReplicationSubnetGroupDescription"`: A description for the replication instance subnet + group. +""" +function modify_replication_subnet_group( + ReplicationSubnetGroupIdentifier, + SubnetIds; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationSubnetGroup", + Dict{String,Any}( + "ReplicationSubnetGroupIdentifier" => ReplicationSubnetGroupIdentifier, + "SubnetIds" => SubnetIds, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_replication_subnet_group( + ReplicationSubnetGroupIdentifier, + SubnetIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationSubnetGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ReplicationSubnetGroupIdentifier" => ReplicationSubnetGroupIdentifier, + "SubnetIds" => SubnetIds, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_replication_task(replication_task_arn) + modify_replication_task(replication_task_arn, params::Dict{String,<:Any}) + +Modifies the specified replication task. You can't modify the task endpoints. The task must +be stopped before you can modify it. For more information about DMS tasks, see Working +with Migration Tasks in the Database Migration Service User Guide. + +# Arguments +- `replication_task_arn`: The Amazon Resource Name (ARN) of the replication task. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CdcStartPosition"`: Indicates when you want a change data capture (CDC) operation to + start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation + to start. Specifying both values results in an error. The value can be in date, + checkpoint, or LSN/SCN format. Date Example: --cdc-start-position “2018-03-08T12:12:12” + Checkpoint Example: --cdc-start-position + \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changel + og.157832:1876#0#0#*#0#93\" LSN Example: --cdc-start-position + “mysql-bin-changelog.000024:373” When you use this task setting with a source + PostgreSQL database, a logical replication slot should already be created and associated + with the source endpoint. You can verify this by setting the slotName extra connection + attribute to the name of this logical replication slot. For more information, see Extra + Connection Attributes When Using PostgreSQL as a Source for DMS. +- `"CdcStartTime"`: Indicates the start time for a change data capture (CDC) operation. Use + either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. + Specifying both values results in an error. Timestamp Example: --cdc-start-time + “2018-03-08T12:12:12” +- `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to + stop. The value can be either server time or commit time. Server time example: + --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: + --cdc-stop-position “commit_time:2018-02-09T12:12:12“ +- `"MigrationType"`: The migration type. Valid values: full-load | cdc | full-load-and-cdc +- `"ReplicationTaskIdentifier"`: The replication task identifier. Constraints: Must + contain 1-255 alphanumeric characters or hyphens. First character must be a letter. + Cannot end with a hyphen or contain two consecutive hyphens. +- `"ReplicationTaskSettings"`: JSON file that contains settings for the task, such as task + metadata settings. +- `"TableMappings"`: When using the CLI or boto3, provide the path of the JSON file that + contains the table mappings. Precede the path with file://. For example, --table-mappings + file://mappingfile.json. When working with the DMS API, provide the JSON as the parameter + value. +- `"TaskData"`: Supplemental information that the task requires to migrate the data for + certain source and target endpoints. For more information, see Specifying Supplemental Data + for Task Settings in the Database Migration Service User Guide. +""" +function modify_replication_task( + ReplicationTaskArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyReplicationTask", + Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_replication_task( + ReplicationTaskArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationTask", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + move_replication_task(replication_task_arn, target_replication_instance_arn) + move_replication_task(replication_task_arn, target_replication_instance_arn, params::Dict{String,<:Any}) + +Moves a replication task from its current replication instance to a different target +replication instance using the specified parameters. The target replication instance must +be created with the same or later DMS version as the current replication instance. + +# Arguments +- `replication_task_arn`: The Amazon Resource Name (ARN) of the task that you want to move. +- `target_replication_instance_arn`: The ARN of the replication instance where you want to + move the task to. + +""" +function move_replication_task( + ReplicationTaskArn, + TargetReplicationInstanceArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) return database_migration_service( - "ModifyEndpoint", - Dict{String,Any}("EndpointArn" => EndpointArn); + "MoveReplicationTask", + Dict{String,Any}( + "ReplicationTaskArn" => ReplicationTaskArn, + "TargetReplicationInstanceArn" => TargetReplicationInstanceArn, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function modify_endpoint( - EndpointArn, +function move_replication_task( + ReplicationTaskArn, + TargetReplicationInstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ModifyEndpoint", + "MoveReplicationTask", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("EndpointArn" => EndpointArn), params) + mergewith( + _merge, + Dict{String,Any}( + "ReplicationTaskArn" => ReplicationTaskArn, + "TargetReplicationInstanceArn" => TargetReplicationInstanceArn, + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2518,45 +4223,47 @@ function modify_endpoint( end """ - modify_event_subscription(subscription_name) - modify_event_subscription(subscription_name, params::Dict{String,<:Any}) + reboot_replication_instance(replication_instance_arn) + reboot_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) -Modifies an existing DMS event notification subscription. +Reboots a replication instance. Rebooting results in a momentary outage, until the +replication instance becomes available again. # Arguments -- `subscription_name`: The name of the DMS event notification subscription to be modified. +- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Enabled"`: A Boolean value; set to true to activate the subscription. -- `"EventCategories"`: A list of event categories for a source type that you want to - subscribe to. Use the DescribeEventCategories action to see a list of event categories. -- `"SnsTopicArn"`: The Amazon Resource Name (ARN) of the Amazon SNS topic created for - event notification. The ARN is created by Amazon SNS when you create a topic and subscribe - to it. -- `"SourceType"`: The type of DMS resource that generates the events you want to subscribe - to. Valid values: replication-instance | replication-task +- `"ForceFailover"`: If this parameter is true, the reboot is conducted through a Multi-AZ + failover. If the instance isn't configured for Multi-AZ, then you can't specify true. ( + --force-planned-failover and --force-failover can't both be set to true.) +- `"ForcePlannedFailover"`: If this parameter is true, the reboot is conducted through a + planned Multi-AZ failover where resources are released and cleaned up prior to conducting + the failover. If the instance isn''t configured for Multi-AZ, then you can't specify true. + ( --force-planned-failover and --force-failover can't both be set to true.) """ -function modify_event_subscription( - SubscriptionName; aws_config::AbstractAWSConfig=global_aws_config() +function reboot_replication_instance( + ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ModifyEventSubscription", - Dict{String,Any}("SubscriptionName" => SubscriptionName); + "RebootReplicationInstance", + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function modify_event_subscription( - SubscriptionName, +function reboot_replication_instance( + ReplicationInstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ModifyEventSubscription", + "RebootReplicationInstance", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("SubscriptionName" => SubscriptionName), params + _merge, + Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + params, ), ); aws_config=aws_config, @@ -2565,84 +4272,46 @@ function modify_event_subscription( end """ - modify_replication_instance(replication_instance_arn) - modify_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) + refresh_schemas(endpoint_arn, replication_instance_arn) + refresh_schemas(endpoint_arn, replication_instance_arn, params::Dict{String,<:Any}) -Modifies the replication instance to apply new settings. You can change one or more -parameters by specifying these parameters and the new values in the request. Some settings -are applied during the maintenance window. +Populates the schema for the specified endpoint. This is an asynchronous operation and can +take several minutes. You can check the status of this operation by calling the +DescribeRefreshSchemasStatus operation. # Arguments +- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the + endpoint. - `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AllocatedStorage"`: The amount of storage (in gigabytes) to be allocated for the - replication instance. -- `"AllowMajorVersionUpgrade"`: Indicates that major version upgrades are allowed. Changing - this parameter does not result in an outage, and the change is asynchronously applied as - soon as possible. This parameter must be set to true when specifying a value for the - EngineVersion parameter that is a different major version than the replication instance's - current version. -- `"ApplyImmediately"`: Indicates whether the changes should be applied immediately or - during the next maintenance window. -- `"AutoMinorVersionUpgrade"`: A value that indicates that minor version upgrades are - applied automatically to the replication instance during the maintenance window. Changing - this parameter doesn't result in an outage, except in the case described following. The - change is asynchronously applied as soon as possible. An outage does result if these - factors apply: This parameter is set to true during the maintenance window. A newer - minor version is available. DMS has enabled automatic patching for the given engine - version. When AutoMinorVersionUpgrade is enabled, DMS uses the current default engine - version when you modify a replication instance. For example, if you set EngineVersion to a - lower version number than the current default version, DMS uses the default version. If - AutoMinorVersionUpgrade isn’t enabled when you modify a replication instance, DMS uses - the engine version specified by the EngineVersion parameter. -- `"EngineVersion"`: The engine version number of the replication instance. When modifying - a major engine version of an instance, also set AllowMajorVersionUpgrade to true. -- `"MultiAZ"`: Specifies whether the replication instance is a Multi-AZ deployment. You - can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. -- `"NetworkType"`: The type of IP address protocol used by a replication instance, such as - IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet - supported. -- `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which system - maintenance can occur, which might result in an outage. Changing this parameter does not - result in an outage, except in the following situation, and the change is asynchronously - applied as soon as possible. If moving this window to the current time, there must be at - least 30 minutes between the current time and end of the window to ensure pending changes - are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon - | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes -- `"ReplicationInstanceClass"`: The compute and memory capacity of the replication instance - as defined for the specified replication instance class. For example to specify the - instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information - on the settings and capacities for the available replication instance classes, see - Selecting the right DMS replication instance for your migration. -- `"ReplicationInstanceIdentifier"`: The replication instance identifier. This parameter is - stored as a lowercase string. -- `"VpcSecurityGroupIds"`: Specifies the VPC security group to be used with the - replication instance. The VPC security group must work with the VPC containing the - replication instance. """ -function modify_replication_instance( - ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function refresh_schemas( + EndpointArn, ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ModifyReplicationInstance", - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); + "RefreshSchemas", + Dict{String,Any}( + "EndpointArn" => EndpointArn, "ReplicationInstanceArn" => ReplicationInstanceArn + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function modify_replication_instance( +function refresh_schemas( + EndpointArn, ReplicationInstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ModifyReplicationInstance", + "RefreshSchemas", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + Dict{String,Any}( + "EndpointArn" => EndpointArn, + "ReplicationInstanceArn" => ReplicationInstanceArn, + ), params, ), ); @@ -2652,49 +4321,51 @@ function modify_replication_instance( end """ - modify_replication_subnet_group(replication_subnet_group_identifier, subnet_ids) - modify_replication_subnet_group(replication_subnet_group_identifier, subnet_ids, params::Dict{String,<:Any}) + reload_replication_tables(replication_config_arn, tables_to_reload) + reload_replication_tables(replication_config_arn, tables_to_reload, params::Dict{String,<:Any}) -Modifies the settings for the specified replication subnet group. +Reloads the target database table with the source data for a given DMS Serverless +replication configuration. You can only use this operation with a task in the RUNNING +state, otherwise the service will throw an InvalidResourceStateFault exception. # Arguments -- `replication_subnet_group_identifier`: The name of the replication instance subnet group. -- `subnet_ids`: A list of subnet IDs. +- `replication_config_arn`: The Amazon Resource Name of the replication config for which to + reload tables. +- `tables_to_reload`: The list of tables to reload. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ReplicationSubnetGroupDescription"`: A description for the replication instance subnet - group. +- `"ReloadOption"`: Options for reload. Specify data-reload to reload the data and + re-validate it if validation is enabled. Specify validate-only to re-validate the table. + This option applies only when validation is enabled for the replication. """ -function modify_replication_subnet_group( - ReplicationSubnetGroupIdentifier, - SubnetIds; - aws_config::AbstractAWSConfig=global_aws_config(), +function reload_replication_tables( + ReplicationConfigArn, TablesToReload; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ModifyReplicationSubnetGroup", + "ReloadReplicationTables", Dict{String,Any}( - "ReplicationSubnetGroupIdentifier" => ReplicationSubnetGroupIdentifier, - "SubnetIds" => SubnetIds, + "ReplicationConfigArn" => ReplicationConfigArn, + "TablesToReload" => TablesToReload, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function modify_replication_subnet_group( - ReplicationSubnetGroupIdentifier, - SubnetIds, +function reload_replication_tables( + ReplicationConfigArn, + TablesToReload, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ModifyReplicationSubnetGroup", + "ReloadReplicationTables", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "ReplicationSubnetGroupIdentifier" => ReplicationSubnetGroupIdentifier, - "SubnetIds" => SubnetIds, + "ReplicationConfigArn" => ReplicationConfigArn, + "TablesToReload" => TablesToReload, ), params, ), @@ -2705,72 +4376,95 @@ function modify_replication_subnet_group( end """ - modify_replication_task(replication_task_arn) - modify_replication_task(replication_task_arn, params::Dict{String,<:Any}) + reload_tables(replication_task_arn, tables_to_reload) + reload_tables(replication_task_arn, tables_to_reload, params::Dict{String,<:Any}) -Modifies the specified replication task. You can't modify the task endpoints. The task must -be stopped before you can modify it. For more information about DMS tasks, see Working -with Migration Tasks in the Database Migration Service User Guide. +Reloads the target database table with the source data. You can only use this operation +with a task in the RUNNING state, otherwise the service will throw an +InvalidResourceStateFault exception. # Arguments - `replication_task_arn`: The Amazon Resource Name (ARN) of the replication task. +- `tables_to_reload`: The name and schema of the table to be reloaded. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CdcStartPosition"`: Indicates when you want a change data capture (CDC) operation to - start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation - to start. Specifying both values results in an error. The value can be in date, - checkpoint, or LSN/SCN format. Date Example: --cdc-start-position “2018-03-08T12:12:12” - Checkpoint Example: --cdc-start-position - \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changel - og.157832:1876#0#0#*#0#93\" LSN Example: --cdc-start-position - “mysql-bin-changelog.000024:373” When you use this task setting with a source - PostgreSQL database, a logical replication slot should already be created and associated - with the source endpoint. You can verify this by setting the slotName extra connection - attribute to the name of this logical replication slot. For more information, see Extra - Connection Attributes When Using PostgreSQL as a Source for DMS. -- `"CdcStartTime"`: Indicates the start time for a change data capture (CDC) operation. Use - either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. - Specifying both values results in an error. Timestamp Example: --cdc-start-time - “2018-03-08T12:12:12” -- `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to - stop. The value can be either server time or commit time. Server time example: - --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: - --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ -- `"MigrationType"`: The migration type. Valid values: full-load | cdc | full-load-and-cdc -- `"ReplicationTaskIdentifier"`: The replication task identifier. Constraints: Must - contain 1-255 alphanumeric characters or hyphens. First character must be a letter. - Cannot end with a hyphen or contain two consecutive hyphens. -- `"ReplicationTaskSettings"`: JSON file that contains settings for the task, such as task - metadata settings. -- `"TableMappings"`: When using the CLI or boto3, provide the path of the JSON file that - contains the table mappings. Precede the path with file://. For example, --table-mappings - file://mappingfile.json. When working with the DMS API, provide the JSON as the parameter - value. -- `"TaskData"`: Supplemental information that the task requires to migrate the data for - certain source and target endpoints. For more information, see Specifying Supplemental Data - for Task Settings in the Database Migration Service User Guide. +- `"ReloadOption"`: Options for reload. Specify data-reload to reload the data and + re-validate it if validation is enabled. Specify validate-only to re-validate the table. + This option applies only when validation is enabled for the task. Valid values: + data-reload, validate-only Default value is data-reload. +""" +function reload_tables( + ReplicationTaskArn, TablesToReload; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ReloadTables", + Dict{String,Any}( + "ReplicationTaskArn" => ReplicationTaskArn, "TablesToReload" => TablesToReload + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reload_tables( + ReplicationTaskArn, + TablesToReload, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ReloadTables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ReplicationTaskArn" => ReplicationTaskArn, + "TablesToReload" => TablesToReload, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + remove_tags_from_resource(resource_arn, tag_keys) + remove_tags_from_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes metadata tags from an DMS resource, including replication instance, endpoint, +subnet group, and migration task. For more information, see Tag data type description. + +# Arguments +- `resource_arn`: An DMS resource from which you want to remove tag(s). The value for this + parameter is an Amazon Resource Name (ARN). +- `tag_keys`: The tag key (name) of the tag to be removed. + """ -function modify_replication_task( - ReplicationTaskArn; aws_config::AbstractAWSConfig=global_aws_config() +function remove_tags_from_resource( + ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "ModifyReplicationTask", - Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn); + "RemoveTagsFromResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function modify_replication_task( - ReplicationTaskArn, +function remove_tags_from_resource( + ResourceArn, + TagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ModifyReplicationTask", + "RemoveTagsFromResource", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("ReplicationTaskArn" => ReplicationTaskArn), params + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + params, ), ); aws_config=aws_config, @@ -2779,48 +4473,62 @@ function modify_replication_task( end """ - move_replication_task(replication_task_arn, target_replication_instance_arn) - move_replication_task(replication_task_arn, target_replication_instance_arn, params::Dict{String,<:Any}) + run_fleet_advisor_lsa_analysis() + run_fleet_advisor_lsa_analysis(params::Dict{String,<:Any}) -Moves a replication task from its current replication instance to a different target -replication instance using the specified parameters. The target replication instance must -be created with the same or later DMS version as the current replication instance. +Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account. + +""" +function run_fleet_advisor_lsa_analysis(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "RunFleetAdvisorLsaAnalysis"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function run_fleet_advisor_lsa_analysis( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "RunFleetAdvisorLsaAnalysis", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_extension_pack_association(migration_project_identifier) + start_extension_pack_association(migration_project_identifier, params::Dict{String,<:Any}) + +Applies the extension pack to your target database. An extension pack is an add-on module +that emulates functions present in a source database that are required when converting +objects to the target database. # Arguments -- `replication_task_arn`: The Amazon Resource Name (ARN) of the task that you want to move. -- `target_replication_instance_arn`: The ARN of the replication instance where you want to - move the task to. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). """ -function move_replication_task( - ReplicationTaskArn, - TargetReplicationInstanceArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function start_extension_pack_association( + MigrationProjectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return database_migration_service( - "MoveReplicationTask", - Dict{String,Any}( - "ReplicationTaskArn" => ReplicationTaskArn, - "TargetReplicationInstanceArn" => TargetReplicationInstanceArn, - ); + "StartExtensionPackAssociation", + Dict{String,Any}("MigrationProjectIdentifier" => MigrationProjectIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function move_replication_task( - ReplicationTaskArn, - TargetReplicationInstanceArn, +function start_extension_pack_association( + MigrationProjectIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "MoveReplicationTask", + "StartExtensionPackAssociation", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "ReplicationTaskArn" => ReplicationTaskArn, - "TargetReplicationInstanceArn" => TargetReplicationInstanceArn, + "MigrationProjectIdentifier" => MigrationProjectIdentifier ), params, ), @@ -2831,46 +4539,49 @@ function move_replication_task( end """ - reboot_replication_instance(replication_instance_arn) - reboot_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) + start_metadata_model_assessment(migration_project_identifier, selection_rules) + start_metadata_model_assessment(migration_project_identifier, selection_rules, params::Dict{String,<:Any}) -Reboots a replication instance. Rebooting results in a momentary outage, until the -replication instance becomes available again. +Creates a database migration assessment report by assessing the migration complexity for +your source database. A database migration assessment report summarizes all of the schema +conversion tasks. It also details the action items for database objects that can't be +converted to the database engine of your target database instance. # Arguments -- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `selection_rules`: A value that specifies the database objects to assess. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ForceFailover"`: If this parameter is true, the reboot is conducted through a Multi-AZ - failover. If the instance isn't configured for Multi-AZ, then you can't specify true. ( - --force-planned-failover and --force-failover can't both be set to true.) -- `"ForcePlannedFailover"`: If this parameter is true, the reboot is conducted through a - planned Multi-AZ failover where resources are released and cleaned up prior to conducting - the failover. If the instance isn''t configured for Multi-AZ, then you can't specify true. - ( --force-planned-failover and --force-failover can't both be set to true.) """ -function reboot_replication_instance( - ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function start_metadata_model_assessment( + MigrationProjectIdentifier, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RebootReplicationInstance", - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn); + "StartMetadataModelAssessment", + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function reboot_replication_instance( - ReplicationInstanceArn, +function start_metadata_model_assessment( + MigrationProjectIdentifier, + SelectionRules, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RebootReplicationInstance", + "StartMetadataModelAssessment", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("ReplicationInstanceArn" => ReplicationInstanceArn), + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ), params, ), ); @@ -2880,45 +4591,45 @@ function reboot_replication_instance( end """ - refresh_schemas(endpoint_arn, replication_instance_arn) - refresh_schemas(endpoint_arn, replication_instance_arn, params::Dict{String,<:Any}) + start_metadata_model_conversion(migration_project_identifier, selection_rules) + start_metadata_model_conversion(migration_project_identifier, selection_rules, params::Dict{String,<:Any}) -Populates the schema for the specified endpoint. This is an asynchronous operation and can -take several minutes. You can check the status of this operation by calling the -DescribeRefreshSchemasStatus operation. +Converts your source database objects to a format compatible with the target database. # Arguments -- `endpoint_arn`: The Amazon Resource Name (ARN) string that uniquely identifies the - endpoint. -- `replication_instance_arn`: The Amazon Resource Name (ARN) of the replication instance. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `selection_rules`: A value that specifies the database objects to convert. """ -function refresh_schemas( - EndpointArn, ReplicationInstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function start_metadata_model_conversion( + MigrationProjectIdentifier, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RefreshSchemas", + "StartMetadataModelConversion", Dict{String,Any}( - "EndpointArn" => EndpointArn, "ReplicationInstanceArn" => ReplicationInstanceArn + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function refresh_schemas( - EndpointArn, - ReplicationInstanceArn, +function start_metadata_model_conversion( + MigrationProjectIdentifier, + SelectionRules, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RefreshSchemas", + "StartMetadataModelConversion", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "EndpointArn" => EndpointArn, - "ReplicationInstanceArn" => ReplicationInstanceArn, + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, ), params, ), @@ -2929,50 +4640,54 @@ function refresh_schemas( end """ - reload_tables(replication_task_arn, tables_to_reload) - reload_tables(replication_task_arn, tables_to_reload, params::Dict{String,<:Any}) + start_metadata_model_export_as_script(migration_project_identifier, origin, selection_rules) + start_metadata_model_export_as_script(migration_project_identifier, origin, selection_rules, params::Dict{String,<:Any}) -Reloads the target database table with the source data. You can only use this operation -with a task in the RUNNING state, otherwise the service will throw an -InvalidResourceStateFault exception. +Saves your converted code to a file as a SQL script, and stores this file on your Amazon S3 +bucket. # Arguments -- `replication_task_arn`: The Amazon Resource Name (ARN) of the replication task. -- `tables_to_reload`: The name and schema of the table to be reloaded. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `origin`: Whether to export the metadata model from the source or the target. +- `selection_rules`: A value that specifies the database objects to export. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ReloadOption"`: Options for reload. Specify data-reload to reload the data and - re-validate it if validation is enabled. Specify validate-only to re-validate the table. - This option applies only when validation is enabled for the task. Valid values: - data-reload, validate-only Default value is data-reload. +- `"FileName"`: The name of the model file to create in the Amazon S3 bucket. """ -function reload_tables( - ReplicationTaskArn, TablesToReload; aws_config::AbstractAWSConfig=global_aws_config() +function start_metadata_model_export_as_script( + MigrationProjectIdentifier, + Origin, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ReloadTables", + "StartMetadataModelExportAsScript", Dict{String,Any}( - "ReplicationTaskArn" => ReplicationTaskArn, "TablesToReload" => TablesToReload + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "Origin" => Origin, + "SelectionRules" => SelectionRules, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function reload_tables( - ReplicationTaskArn, - TablesToReload, +function start_metadata_model_export_as_script( + MigrationProjectIdentifier, + Origin, + SelectionRules, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "ReloadTables", + "StartMetadataModelExportAsScript", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "ReplicationTaskArn" => ReplicationTaskArn, - "TablesToReload" => TablesToReload, + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "Origin" => Origin, + "SelectionRules" => SelectionRules, ), params, ), @@ -2983,40 +4698,51 @@ function reload_tables( end """ - remove_tags_from_resource(resource_arn, tag_keys) - remove_tags_from_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + start_metadata_model_export_to_target(migration_project_identifier, selection_rules) + start_metadata_model_export_to_target(migration_project_identifier, selection_rules, params::Dict{String,<:Any}) -Removes metadata tags from an DMS resource, including replication instance, endpoint, -subnet group, and migration task. For more information, see Tag data type description. +Applies converted database objects to your target database. # Arguments -- `resource_arn`: An DMS resource from which you want to remove tag(s). The value for this - parameter is an Amazon Resource Name (ARN). -- `tag_keys`: The tag key (name) of the tag to be removed. +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `selection_rules`: A value that specifies the database objects to export. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OverwriteExtensionPack"`: Whether to overwrite the migration project extension pack. An + extension pack is an add-on module that emulates functions present in a source database + that are required when converting objects to the target database. """ -function remove_tags_from_resource( - ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +function start_metadata_model_export_to_target( + MigrationProjectIdentifier, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RemoveTagsFromResource", - Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); + "StartMetadataModelExportToTarget", + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function remove_tags_from_resource( - ResourceArn, - TagKeys, +function start_metadata_model_export_to_target( + MigrationProjectIdentifier, + SelectionRules, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RemoveTagsFromResource", + "StartMetadataModelExportToTarget", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "SelectionRules" => SelectionRules, + ), params, ), ); @@ -3026,23 +4752,59 @@ function remove_tags_from_resource( end """ - run_fleet_advisor_lsa_analysis() - run_fleet_advisor_lsa_analysis(params::Dict{String,<:Any}) + start_metadata_model_import(migration_project_identifier, origin, selection_rules) + start_metadata_model_import(migration_project_identifier, origin, selection_rules, params::Dict{String,<:Any}) -Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account. +Loads the metadata for all the dependent database objects of the parent object. This +operation uses your project's Amazon S3 bucket as a metadata cache to improve performance. +# Arguments +- `migration_project_identifier`: The migration project name or Amazon Resource Name (ARN). +- `origin`: Whether to load metadata to the source or target database. +- `selection_rules`: A value that specifies the database objects to import. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Refresh"`: If true, DMS loads metadata for the specified objects from the source + database. """ -function run_fleet_advisor_lsa_analysis(; aws_config::AbstractAWSConfig=global_aws_config()) +function start_metadata_model_import( + MigrationProjectIdentifier, + Origin, + SelectionRules; + aws_config::AbstractAWSConfig=global_aws_config(), +) return database_migration_service( - "RunFleetAdvisorLsaAnalysis"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "StartMetadataModelImport", + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "Origin" => Origin, + "SelectionRules" => SelectionRules, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function run_fleet_advisor_lsa_analysis( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function start_metadata_model_import( + MigrationProjectIdentifier, + Origin, + SelectionRules, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return database_migration_service( - "RunFleetAdvisorLsaAnalysis", - params; + "StartMetadataModelImport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MigrationProjectIdentifier" => MigrationProjectIdentifier, + "Origin" => Origin, + "SelectionRules" => SelectionRules, + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -3097,6 +4859,71 @@ function start_recommendations( ) end +""" + start_replication(replication_config_arn, start_replication_type) + start_replication(replication_config_arn, start_replication_type, params::Dict{String,<:Any}) + +For a given DMS Serverless replication configuration, DMS connects to the source endpoint +and collects the metadata to analyze the replication workload. Using this metadata, DMS +then computes and provisions the required capacity and starts replicating to the target +endpoint using the server resources that DMS has provisioned for the DMS Serverless +replication. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication for which to start + replication. +- `start_replication_type`: The replication type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CdcStartPosition"`: Indicates when you want a change data capture (CDC) operation to + start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation + to start. Specifying both values results in an error. The value can be in date, checkpoint, + or LSN/SCN format. +- `"CdcStartTime"`: Indicates the start time for a change data capture (CDC) operation. Use + either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. + Specifying both values results in an error. +- `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to + stop. The value can be either server time or commit time. +""" +function start_replication( + ReplicationConfigArn, + StartReplicationType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StartReplication", + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "StartReplicationType" => StartReplicationType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_replication( + ReplicationConfigArn, + StartReplicationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StartReplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "StartReplicationType" => StartReplicationType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_replication_task(replication_task_arn, start_replication_task_type) start_replication_task(replication_task_arn, start_replication_task_type, params::Dict{String,<:Any}) @@ -3138,7 +4965,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” Commit time example: - --cdc-stop-position “commit_time: 2018-02-09T12:12:12“ + --cdc-stop-position “commit_time:2018-02-09T12:12:12“ """ function start_replication_task( ReplicationTaskArn, @@ -3314,6 +5141,46 @@ function start_replication_task_assessment_run( ) end +""" + stop_replication(replication_config_arn) + stop_replication(replication_config_arn, params::Dict{String,<:Any}) + +For a given DMS Serverless replication configuration, DMS stops any and all ongoing DMS +Serverless replications. This command doesn't deprovision the stopped replications. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication to stop. + +""" +function stop_replication( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "StopReplication", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_replication( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StopReplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_replication_task(replication_task_arn) stop_replication_task(replication_task_arn, params::Dict{String,<:Any}) @@ -3405,11 +5272,11 @@ end Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts them to corresponding Amazon EventBridge rules. By default, this operation migrates subscriptions -only when all your replication instance versions are 3.4.6 or higher. If any replication -instances are from versions earlier than 3.4.6, the operation raises an error and tells you -to upgrade these instances to version 3.4.6 or higher. To enable migration regardless of +only when all your replication instance versions are 3.4.5 or higher. If any replication +instances are from versions earlier than 3.4.5, the operation raises an error and tells you +to upgrade these instances to version 3.4.5 or higher. To enable migration regardless of version, set the Force option to true. However, if you don't upgrade instances earlier than -version 3.4.6, some types of events might not be available when you use Amazon EventBridge. +version 3.4.5, some types of events might not be available when you use Amazon EventBridge. To call this operation, make sure that you have certain permissions added to your user account. For more information, see Migrating event subscriptions to Amazon EventBridge in the Amazon Web Services Database Migration Service User Guide. @@ -3419,7 +5286,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ForceMove"`: When set to true, this operation migrates DMS subscriptions for Amazon SNS notifications no matter what your replication instance version is. If not set or set to false, this operation runs only when all your replication instances are from DMS version - 3.4.6 or higher. + 3.4.5 or higher. """ function update_subscriptions_to_event_bridge(; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/dataexchange.jl b/src/services/dataexchange.jl index 837e72634b..9e7fa1fc7a 100644 --- a/src/services/dataexchange.jl +++ b/src/services/dataexchange.jl @@ -841,6 +841,59 @@ function send_api_asset( ) end +""" + send_data_set_notification(data_set_id, type) + send_data_set_notification(data_set_id, type, params::Dict{String,<:Any}) + +The type of event associated with the data set. + +# Arguments +- `data_set_id`: Affected data set of the notification. +- `type`: The type of the notification. Describing the kind of event the notification is + alerting you to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency key for the notification, this key allows us to deduplicate + notifications that are sent in quick succession erroneously. +- `"Comment"`: Free-form text field for providers to add information about their + notifications. +- `"Details"`: Extra details specific to this notification type. +- `"Scope"`: Affected scope of this notification such as the underlying resources affected + by the notification event. +""" +function send_data_set_notification( + DataSetId, Type; aws_config::AbstractAWSConfig=global_aws_config() +) + return dataexchange( + "POST", + "/v1/data-sets/$(DataSetId)/notification", + Dict{String,Any}("Type" => Type, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_data_set_notification( + DataSetId, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return dataexchange( + "POST", + "/v1/data-sets/$(DataSetId)/notification", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Type" => Type, "ClientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_job(job_id) start_job(job_id, params::Dict{String,<:Any}) diff --git a/src/services/datasync.jl b/src/services/datasync.jl index dd491b6198..5f9eff458c 100644 --- a/src/services/datasync.jl +++ b/src/services/datasync.jl @@ -13,7 +13,8 @@ DataSync Discovery to collect information about. # Arguments - `agent_arns`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that - connects to and reads from your on-premises storage system's management interface. + connects to and reads from your on-premises storage system's management interface. You can + only specify one ARN. - `client_token`: Specifies a client token to make sure requests with this API operation are idempotent. If you don't specify a client token, DataSync generates one for you automatically. @@ -131,49 +132,32 @@ end create_agent(activation_key) create_agent(activation_key, params::Dict{String,<:Any}) -Activates an DataSync agent that you have deployed in your storage environment. The -activation process associates your agent with your account. In the activation process, you -specify information such as the Amazon Web Services Region that you want to activate the -agent in. You activate the agent in the Amazon Web Services Region where your target -locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web -Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the -agent access to a VPC endpoint so you can run tasks without going over the public internet. -You can use an agent for more than one location. If a task uses multiple agents, all of -them need to have status AVAILABLE for the task to run. If you use multiple agents for a -source location, the status of all the agents must be AVAILABLE for the task to run. -Agents are automatically updated by Amazon Web Services on a regular basis, using a -mechanism that ensures minimal interruption to your tasks. +Activates an DataSync agent that you've deployed in your storage environment. The +activation process associates the agent with your Amazon Web Services account. If you +haven't deployed an agent yet, see the following topics to learn more: Agent +requirements Create an agent If you're transferring between Amazon Web Services +storage services, you don't need a DataSync agent. # Arguments -- `activation_key`: Your agent activation key. You can get the activation key either by - sending an HTTP GET request with redirects that enable you to get the agent IP address - (port 80). Alternatively, you can get it from the DataSync console. The redirect URL - returned in the response provides you the activation key for your agent in the query string - parameter activationKey. It might also include other activation-related parameters; - however, these are merely defaults. The arguments you pass to this API call determine the - actual configuration of your agent. For more information, see Activating an Agent in the - DataSync User Guide. +- `activation_key`: Specifies your DataSync agent's activation key. If you don't have an + activation key, see Activate your agent. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentName"`: The name you configured for your agent. This value is a text reference - that is used to identify the agent in the console. -- `"SecurityGroupArns"`: The ARNs of the security groups used to protect your data transfer - task subnets. See SecurityGroupArns. -- `"SubnetArns"`: The Amazon Resource Names (ARNs) of the subnets in which DataSync will - create elastic network interfaces for each data transfer task. The agent that runs a task - must be private. When you start a task that is associated with an agent created in a VPC, - or one that has access to an IP address in a VPC, then the task is also private. In this - case, DataSync creates four network interfaces for each task in your subnet. For a data - transfer to work, the agent must be able to route to all these four network interfaces. -- `"Tags"`: The key-value pair that represents the tag that you want to associate with the - agent. The value can be an empty string. This value helps you manage, filter, and search - for your agents. Valid characters for key and value are letters, spaces, and numbers - representable in UTF-8 format, and the following special characters: + - = . _ : / @. -- `"VpcEndpointId"`: The ID of the VPC (virtual private cloud) endpoint that the agent has - access to. This is the client-side VPC endpoint, also called a PrivateLink. If you don't - have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service Configuration in the - Amazon VPC User Guide. VPC endpoint ID looks like this: vpce-01234d5aff67890e1. +- `"AgentName"`: Specifies a name for your agent. You can see this name in the DataSync + console. +- `"SecurityGroupArns"`: Specifies the Amazon Resource Name (ARN) of the security group + that protects your task's network interfaces when using a virtual private cloud (VPC) + endpoint. You can only specify one ARN. +- `"SubnetArns"`: Specifies the ARN of the subnet where you want to run your DataSync task + when using a VPC endpoint. This is the subnet where DataSync creates and manages the + network interfaces for your transfer. You can only specify one ARN. +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least one tag for your agent. +- `"VpcEndpointId"`: Specifies the ID of the VPC endpoint that you want your agent to + connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC + endpoint you use must include the DataSync service name (for example, + com.amazonaws.us-east-2.datasync). """ function create_agent(ActivationKey; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -198,12 +182,90 @@ function create_agent( ) end +""" + create_location_azure_blob(agent_arns, authentication_type, container_url) + create_location_azure_blob(agent_arns, authentication_type, container_url, params::Dict{String,<:Any}) + +Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use +this location as a transfer source or destination. Before you begin, make sure you know how +DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also +need a DataSync agent that can connect to your container. + +# Arguments +- `agent_arns`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that can + connect with your Azure Blob Storage container. You can specify more than one agent. For + more information, see Using multiple agents for your transfer. +- `authentication_type`: Specifies the authentication method DataSync uses to access your + Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS). +- `container_url`: Specifies the URL of the Azure Blob Storage container involved in your + transfer. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessTier"`: Specifies the access tier that you want your objects or files transferred + into. This only applies when using the location as a transfer destination. For more + information, see Access tiers. +- `"BlobType"`: Specifies the type of blob that you want your objects or files to be when + transferring them into Azure Blob Storage. Currently, DataSync only supports moving data + into Azure Blob Storage as block blobs. For more information on blob types, see the Azure + Blob Storage documentation. +- `"SasConfiguration"`: Specifies the SAS configuration that allows DataSync to access your + Azure Blob Storage. +- `"Subdirectory"`: Specifies path segments if you want to limit your transfer to a virtual + directory in your container (for example, /my/images). +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least a name tag for your transfer + location. +""" +function create_location_azure_blob( + AgentArns, + AuthenticationType, + ContainerUrl; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "CreateLocationAzureBlob", + Dict{String,Any}( + "AgentArns" => AgentArns, + "AuthenticationType" => AuthenticationType, + "ContainerUrl" => ContainerUrl, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_location_azure_blob( + AgentArns, + AuthenticationType, + ContainerUrl, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "CreateLocationAzureBlob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AgentArns" => AgentArns, + "AuthenticationType" => AuthenticationType, + "ContainerUrl" => ContainerUrl, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_location_efs(ec2_config, efs_filesystem_arn) create_location_efs(ec2_config, efs_filesystem_arn, params::Dict{String,<:Any}) -Creates an endpoint for an Amazon EFS file system that DataSync can access for a transfer. -For more information, see Creating a location for Amazon EFS. +Creates a transfer location for an Amazon EFS file system. DataSync can use this location +as a source or destination for transferring data. Before you begin, make sure that you +understand how DataSync accesses Amazon EFS file systems. # Arguments - `ec2_config`: Specifies the subnet and security groups DataSync uses to access your @@ -264,7 +326,9 @@ end create_location_fsx_lustre(fsx_filesystem_arn, security_group_arns) create_location_fsx_lustre(fsx_filesystem_arn, security_group_arns, params::Dict{String,<:Any}) -Creates an endpoint for an Amazon FSx for Lustre file system. +Creates a transfer location for an Amazon FSx for Lustre file system. DataSync can use this +location as a source or destination for transferring data. Before you begin, make sure that +you understand how DataSync accesses FSx for Lustre file systems. # Arguments - `fsx_filesystem_arn`: The Amazon Resource Name (ARN) for the FSx for Lustre file system. @@ -319,8 +383,9 @@ end create_location_fsx_ontap(protocol, security_group_arns, storage_virtual_machine_arn) create_location_fsx_ontap(protocol, security_group_arns, storage_virtual_machine_arn, params::Dict{String,<:Any}) -Creates an endpoint for an Amazon FSx for NetApp ONTAP file system that DataSync can access -for a transfer. For more information, see Creating a location for FSx for ONTAP. +Creates a transfer location for an Amazon FSx for NetApp ONTAP file system. DataSync can +use this location as a source or destination for transferring data. Before you begin, make +sure that you understand how DataSync accesses FSx for ONTAP file systems. # Arguments - `protocol`: @@ -389,9 +454,10 @@ end create_location_fsx_open_zfs(fsx_filesystem_arn, protocol, security_group_arns) create_location_fsx_open_zfs(fsx_filesystem_arn, protocol, security_group_arns, params::Dict{String,<:Any}) -Creates an endpoint for an Amazon FSx for OpenZFS file system that DataSync can access for -a transfer. For more information, see Creating a location for FSx for OpenZFS. Request -parameters related to SMB aren't supported with the CreateLocationFsxOpenZfs operation. +Creates a transfer location for an Amazon FSx for OpenZFS file system. DataSync can use +this location as a source or destination for transferring data. Before you begin, make sure +that you understand how DataSync accesses FSx for OpenZFS file systems. Request parameters +related to SMB aren't supported with the CreateLocationFsxOpenZfs operation. # Arguments - `fsx_filesystem_arn`: The Amazon Resource Name (ARN) of the FSx for OpenZFS file system. @@ -454,26 +520,32 @@ end create_location_fsx_windows(fsx_filesystem_arn, password, security_group_arns, user) create_location_fsx_windows(fsx_filesystem_arn, password, security_group_arns, user, params::Dict{String,<:Any}) -Creates an endpoint for an Amazon FSx for Windows File Server file system. +Creates a transfer location for an Amazon FSx for Windows File Server file system. DataSync +can use this location as a source or destination for transferring data. Before you begin, +make sure that you understand how DataSync accesses FSx for Windows File Server file +systems. # Arguments - `fsx_filesystem_arn`: Specifies the Amazon Resource Name (ARN) for the FSx for Windows File Server file system. -- `password`: Specifies the password of the user who has the permissions to access files - and folders in the file system. +- `password`: Specifies the password of the user with the permissions to mount and access + the files, folders, and file metadata in your FSx for Windows File Server file system. - `security_group_arns`: Specifies the ARNs of the security groups that provide access to your file system's preferred subnet. If you choose a security group that doesn't allow connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group. -- `user`: Specifies the user who has the permissions to access files and folders in the - file system. For information about choosing a user name that ensures sufficient permissions - to files, folders, and metadata, see user. +- `user`: Specifies the user with the permissions to mount and access the files, folders, + and file metadata in your FSx for Windows File Server file system. For information about + choosing a user with the right level of access for your transfer, see required permissions + for FSx for Windows File Server locations. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Domain"`: Specifies the name of the Windows domain that the FSx for Windows File Server - belongs to. +- `"Domain"`: Specifies the name of the Microsoft Active Directory domain that the FSx for + Windows File Server file system belongs to. If you have multiple Active Directory domains + in your environment, configuring this parameter makes sure that DataSync connects to the + right file system. - `"Subdirectory"`: Specifies a mount path for your file system using forward slashes. This is where DataSync reads or writes data (depending on if this is a source or destination location). @@ -530,7 +602,9 @@ end create_location_hdfs(agent_arns, authentication_type, name_nodes) create_location_hdfs(agent_arns, authentication_type, name_nodes, params::Dict{String,<:Any}) -Creates an endpoint for a Hadoop Distributed File System (HDFS). +Creates a transfer location for a Hadoop Distributed File System (HDFS). DataSync can use +this location as a source or destination for transferring data. Before you begin, make sure +that you understand how DataSync accesses HDFS clusters. # Arguments - `agent_arns`: The Amazon Resource Names (ARNs) of the agents that are used to connect to @@ -621,39 +695,28 @@ end create_location_nfs(on_prem_config, server_hostname, subdirectory) create_location_nfs(on_prem_config, server_hostname, subdirectory, params::Dict{String,<:Any}) -Defines a file system on a Network File System (NFS) server that can be read from or -written to. +Creates a transfer location for a Network File System (NFS) file server. DataSync can use +this location as a source or destination for transferring data. Before you begin, make sure +that you understand how DataSync accesses NFS file servers. If you're copying data to or +from an Snowcone device, you can also use CreateLocationNfs to create your transfer +location. For more information, see Configuring transfers with Snowcone. # Arguments -- `on_prem_config`: Contains a list of Amazon Resource Names (ARNs) of agents that are used - to connect to an NFS server. If you are copying data to or from your Snowcone device, see - NFS Server on Snowcone for more information. -- `server_hostname`: The name of the NFS server. This value is the IP address or Domain - Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this - hostname to mount the NFS server in a network. If you are copying data to or from your - Snowcone device, see NFS Server on Snowcone for more information. This name must either be - DNS-compliant or must be an IP version 4 (IPv4) address. -- `subdirectory`: The subdirectory in the NFS file system that is used to read data from - the NFS source location or write data to the NFS destination. The NFS path should be a path - that's exported by the NFS server, or a subdirectory of that path. The path should be such - that it can be mounted by other NFS clients in your network. To see all the paths exported - by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access - to your server. You can specify any directory that appears in the results, and any - subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos - authentication. To transfer all the data in the folder you specified, DataSync needs to - have permissions to read all the data. To ensure this, either configure the NFS export with - no_root_squash, or ensure that the permissions for all of the files that you want DataSync - allow read access for all users. Doing either enables the agent to read the files. For the - agent to access directories, you must additionally enable all execute access. If you are - copying data to or from your Snowcone device, see NFS Server on Snowcone for more - information. For information about NFS export configuration, see 18.7. The /etc/exports - Configuration File in the Red Hat Enterprise Linux documentation. +- `on_prem_config`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that + want to connect to your NFS file server. You can specify more than one agent. For more + information, see Using multiple agents for transfers. +- `server_hostname`: Specifies the Domain Name System (DNS) name or IP version 4 address of + the NFS file server that your DataSync agent connects to. +- `subdirectory`: Specifies the export path in your NFS file server that you want DataSync + to mount. This path (or a subdirectory of the path) is where DataSync transfers data to or + from. For information on configuring an export for DataSync, see Accessing NFS file servers. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MountOptions"`: The NFS mount options that DataSync can use to mount your NFS share. -- `"Tags"`: The key-value pair that represents the tag that you want to add to the - location. The value can be an empty string. We recommend using tags to name your resources. +- `"MountOptions"`: Specifies the options that DataSync can use to mount your NFS file + server. +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least a name tag for your location. """ function create_location_nfs( OnPremConfig, @@ -701,8 +764,9 @@ end create_location_object_storage(agent_arns, bucket_name, server_hostname) create_location_object_storage(agent_arns, bucket_name, server_hostname, params::Dict{String,<:Any}) -Creates an endpoint for an object storage system that DataSync can access for a transfer. -For more information, see Creating a location for object storage. +Creates a transfer location for an object storage system. DataSync can use this location as +a source or destination for transferring data. Before you begin, make sure that you +understand the prerequisites for DataSync to work with object storage systems. # Arguments - `agent_arns`: Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can @@ -717,10 +781,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys required to authenticate with the object storage server. - `"SecretKey"`: Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server. -- `"ServerCertificate"`: Specifies a certificate to authenticate with an object storage - system that uses a private or self-signed certificate authority (CA). You must specify a - Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). - The certificate can be up to 32768 bytes (before Base64 encoding). To use this parameter, +- `"ServerCertificate"`: Specifies a certificate chain for DataSync to authenticate with + your object storage system if the system uses a private or self-signed certificate + authority (CA). You must specify a single .pem file with a full certificate chain (for + example, file:///home/user/.ssh/object_storage_certificates.pem). The certificate chain + might include: The object storage system's certificate All intermediate certificates + (if there are any) The root certificate of the signing CA You can concatenate your + certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The + following example cat command creates an object_storage_certificates.pem file that includes + three certificates: cat object_server_certificate.pem intermediate_certificate.pem + ca_root_certificate.pem > object_storage_certificates.pem To use this parameter, configure ServerProtocol to HTTPS. - `"ServerPort"`: Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443). @@ -776,34 +846,37 @@ end create_location_s3(s3_bucket_arn, s3_config) create_location_s3(s3_bucket_arn, s3_config, params::Dict{String,<:Any}) -A location is an endpoint for an Amazon S3 bucket. DataSync can use the location as a -source or destination for copying data. Before you create your location, make sure that -you read the following sections: Storage class considerations with Amazon S3 locations - Evaluating S3 request costs when using DataSync For more information, see Creating -an Amazon S3 location. +Creates a transfer location for an Amazon S3 bucket. DataSync can use this location as a +source or destination for transferring data. Before you begin, make sure that you read the +following topics: Storage class considerations with Amazon S3 locations Evaluating +S3 request costs when using DataSync For more information, see Configuring transfers +with Amazon S3. # Arguments -- `s3_bucket_arn`: The ARN of the Amazon S3 bucket. If the bucket is on an Amazon Web - Services Outpost, this must be an access point ARN. +- `s3_bucket_arn`: Specifies the ARN of the S3 bucket that you want to use as a location. + (When creating your DataSync task later, you specify whether this location is a transfer + source or destination.) If your S3 bucket is located on an Outposts resource, you must + specify an Amazon S3 access point. For more information, see Managing data access with + Amazon S3 access points in the Amazon S3 User Guide. - `s3_config`: # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentArns"`: If you're using DataSync on an Amazon Web Services Outpost, specify the - Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more - information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy - your DataSync agent on Outposts. -- `"S3StorageClass"`: The Amazon S3 storage class that you want to store your files in when - this location is used as a task destination. For buckets in Amazon Web Services Regions, - the storage class defaults to Standard. For buckets on Outposts, the storage class defaults - to Amazon Web Services S3 Outposts. For more information about S3 storage classes, see - Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 - storage cost. For detailed information, see Considerations when working with S3 storage - classes in DataSync. -- `"Subdirectory"`: A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 - is used to read data from the S3 source location or write data to the S3 destination. -- `"Tags"`: The key-value pair that represents the tag that you want to add to the - location. The value can be an empty string. We recommend using tags to name your resources. +- `"AgentArns"`: (Amazon S3 on Outposts only) Specifies the Amazon Resource Name (ARN) of + the DataSync agent on your Outpost. For more information, see Deploy your DataSync agent on + Outposts. +- `"S3StorageClass"`: Specifies the storage class that you want your objects to use when + Amazon S3 is a transfer destination. For buckets in Amazon Web Services Regions, the + storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to + OUTPOSTS. For more information, see Storage class considerations with Amazon S3 transfers. +- `"Subdirectory"`: Specifies a prefix in the S3 bucket that DataSync reads from or writes + to (depending on whether the bucket is a source or destination location). DataSync can't + transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ + patterns. For example: /photos photos//2006/January photos/./2006/February + photos/../2006/March +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least a name tag for your transfer + location. """ function create_location_s3( S3BucketArn, S3Config; aws_config::AbstractAWSConfig=global_aws_config() @@ -839,8 +912,9 @@ end create_location_smb(agent_arns, password, server_hostname, subdirectory, user) create_location_smb(agent_arns, password, server_hostname, subdirectory, user, params::Dict{String,<:Any}) -Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access -for a transfer. For more information, see Creating an SMB location. +Creates a transfer location for a Server Message Block (SMB) file server. DataSync can use +this location as a source or destination for transferring data. Before you begin, make sure +that you understand how DataSync accesses SMB file servers. # Arguments - `agent_arns`: Specifies the DataSync agent (or agents) which you want to connect to your @@ -854,18 +928,18 @@ for a transfer. For more information, see Creating an SMB location. - `subdirectory`: Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory). Make sure that other SMB clients in your network can also - mount this path. To copy all data in the specified subdirectory, DataSync must be able to - mount the SMB share and access all of its data. For more information, see required - permissions for SMB locations. -- `user`: Specifies the user name that can mount your SMB file server and has permission to - access the files and folders involved in your transfer. For information about choosing a - user with the right level of access for your transfer, see required permissions for SMB - locations. + mount this path. To copy all data in the subdirectory, DataSync must be able to mount the + SMB share and access all of its data. For more information, see required permissions for + SMB locations. +- `user`: Specifies the user that can mount and access the files, folders, and file + metadata in your SMB file server. For information about choosing a user with the right + level of access for your transfer, see required permissions for SMB locations. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Domain"`: Specifies the Windows domain name that your SMB file server belongs to. For - more information, see required permissions for SMB locations. +- `"Domain"`: Specifies the name of the Active Directory domain that your SMB file server + belongs to. If you have multiple Active Directory domains in your environment, configuring + this parameter makes sure that DataSync connects to the right file server. - `"MountOptions"`: Specifies the version of the SMB protocol that DataSync uses to access your SMB file server. - `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon @@ -926,36 +1000,42 @@ end create_task(destination_location_arn, source_location_arn, params::Dict{String,<:Any}) Configures a task, which defines where and how DataSync transfers your data. A task -includes a source location, a destination location, and the preferences for how and when -you want to transfer your data (such as bandwidth limits, scheduling, among other options). - If you're planning to transfer data to or from an Amazon S3 location, review how DataSync -can affect your S3 request charges and the DataSync pricing page before you begin. +includes a source location, destination location, and transfer options (such as bandwidth +limits, scheduling, and more). If you're planning to transfer data to or from an Amazon S3 +location, review how DataSync can affect your S3 request charges and the DataSync pricing +page before you begin. # Arguments -- `destination_location_arn`: The Amazon Resource Name (ARN) of an Amazon Web Services - storage resource's location. -- `source_location_arn`: The Amazon Resource Name (ARN) of the source location for the task. +- `destination_location_arn`: Specifies the ARN of your transfer's destination location. +- `source_location_arn`: Specifies the ARN of your transfer's source location. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CloudWatchLogGroupArn"`: The Amazon Resource Name (ARN) of the Amazon CloudWatch log - group that is used to monitor and log events in the task. -- `"Excludes"`: Specifies a list of filter rules that exclude specific data during your - transfer. For more information and examples, see Filtering data transferred by DataSync. -- `"Includes"`: Specifies a list of filter rules that include specific data during your - transfer. For more information and examples, see Filtering data transferred by DataSync. -- `"Name"`: The name of a task. This value is a text reference that is used to identify the - task in the console. -- `"Options"`: Specifies the configuration options for a task. Some options include - preserving file or object metadata and verifying data integrity. You can also override - these options before starting an individual run of a task (also known as a task execution). - For more information, see StartTaskExecution. -- `"Schedule"`: Specifies a schedule used to periodically transfer files from a source to a - destination location. The schedule should be specified in UTC time. For more information, - see Scheduling your task. -- `"Tags"`: Specifies the tags that you want to apply to the Amazon Resource Name (ARN) - representing the task. Tags are key-value pairs that help you manage, filter, and search - for your DataSync resources. +- `"CloudWatchLogGroupArn"`: Specifies the Amazon Resource Name (ARN) of an Amazon + CloudWatch log group for monitoring your task. +- `"Excludes"`: Specifies exclude filters that define the files, objects, and folders in + your source location that you don't want DataSync to transfer. For more information and + examples, see Specifying what DataSync transfers by using filters. +- `"Includes"`: Specifies include filters define the files, objects, and folders in your + source location that you want DataSync to transfer. For more information and examples, see + Specifying what DataSync transfers by using filters. +- `"ManifestConfig"`: Configures a manifest, which is a list of files or objects that you + want DataSync to transfer. For more information and configuration examples, see Specifying + what DataSync transfers by using a manifest. When using this parameter, your caller + identity (the role that you're using DataSync with) must have the iam:PassRole permission. + The AWSDataSyncFullAccess policy includes this permission. +- `"Name"`: Specifies the name of your task. +- `"Options"`: Specifies your task's settings, such as preserving file metadata, verifying + data integrity, among other options. +- `"Schedule"`: Specifies a schedule for when you want your task to run. For more + information, see Scheduling your task. +- `"Tags"`: Specifies the tags that you want to apply to your task. Tags are key-value + pairs that help you manage, filter, and search for your DataSync resources. +- `"TaskReportConfig"`: Specifies how you want to configure a task report, which provides + detailed information about your DataSync transfer. For more information, see Monitoring + your DataSync transfers with task reports. When using this parameter, your caller identity + (the role that you're using DataSync with) must have the iam:PassRole permission. The + AWSDataSyncFullAccess policy includes this permission. """ function create_task( DestinationLocationArn, @@ -999,10 +1079,10 @@ end delete_agent(agent_arn) delete_agent(agent_arn, params::Dict{String,<:Any}) -Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of -the agent in your request. The operation disassociates the agent from your Amazon Web -Services account. However, it doesn't delete the agent virtual machine (VM) from your -on-premises environment. +Removes an DataSync agent resource from your Amazon Web Services account. Keep in mind that +this operation (which can't be undone) doesn't remove the agent's virtual machine (VM) or +Amazon EC2 instance from your storage environment. For next steps, you can delete the VM or +instance from your storage environment or reuse it to activate a new agent. # Arguments - `agent_arn`: The Amazon Resource Name (ARN) of the agent to delete. Use the ListAgents @@ -1036,7 +1116,7 @@ end delete_location(location_arn) delete_location(location_arn, params::Dict{String,<:Any}) -Deletes the configuration of a location used by DataSync. +Deletes a transfer location resource from DataSync. # Arguments - `location_arn`: The Amazon Resource Name (ARN) of the location to delete. @@ -1069,7 +1149,7 @@ end delete_task(task_arn) delete_task(task_arn, params::Dict{String,<:Any}) -Deletes an DataSync task. +Deletes a transfer task resource from DataSync. # Arguments - `task_arn`: Specifies the Amazon Resource Name (ARN) of the task that you want to delete. @@ -1098,10 +1178,12 @@ end describe_agent(agent_arn) describe_agent(agent_arn, params::Dict{String,<:Any}) -Returns metadata about an DataSync agent, such as its name, endpoint type, and status. +Returns information about an DataSync agent, such as its name, service endpoint type, and +status. # Arguments -- `agent_arn`: Specifies the Amazon Resource Name (ARN) of the DataSync agent to describe. +- `agent_arn`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that you want + information about. """ function describe_agent(AgentArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1165,11 +1247,49 @@ function describe_discovery_job( ) end +""" + describe_location_azure_blob(location_arn) + describe_location_azure_blob(location_arn, params::Dict{String,<:Any}) + +Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage +is configured. + +# Arguments +- `location_arn`: Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage + transfer location. + +""" +function describe_location_azure_blob( + LocationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return datasync( + "DescribeLocationAzureBlob", + Dict{String,Any}("LocationArn" => LocationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_location_azure_blob( + LocationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "DescribeLocationAzureBlob", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("LocationArn" => LocationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_location_efs(location_arn) describe_location_efs(location_arn, params::Dict{String,<:Any}) -Returns metadata about your DataSync location for an Amazon EFS file system. +Provides details about how an DataSync transfer location for an Amazon EFS file system is +configured. # Arguments - `location_arn`: The Amazon Resource Name (ARN) of the Amazon EFS file system location @@ -1205,8 +1325,8 @@ end describe_location_fsx_lustre(location_arn) describe_location_fsx_lustre(location_arn, params::Dict{String,<:Any}) -Provides details about how an DataSync location for an Amazon FSx for Lustre file system is -configured. +Provides details about how an DataSync transfer location for an Amazon FSx for Lustre file +system is configured. # Arguments - `location_arn`: The Amazon Resource Name (ARN) of the FSx for Lustre location to @@ -1242,9 +1362,9 @@ end describe_location_fsx_ontap(location_arn) describe_location_fsx_ontap(location_arn, params::Dict{String,<:Any}) -Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file -system is configured. If your location uses SMB, the DescribeLocationFsxOntap operation -doesn't actually return a Password. +Provides details about how an DataSync transfer location for an Amazon FSx for NetApp ONTAP +file system is configured. If your location uses SMB, the DescribeLocationFsxOntap +operation doesn't actually return a Password. # Arguments - `location_arn`: Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP file system @@ -1280,8 +1400,8 @@ end describe_location_fsx_open_zfs(location_arn) describe_location_fsx_open_zfs(location_arn, params::Dict{String,<:Any}) -Provides details about how an DataSync location for an Amazon FSx for OpenZFS file system -is configured. Response elements related to SMB aren't supported with the +Provides details about how an DataSync transfer location for an Amazon FSx for OpenZFS file +system is configured. Response elements related to SMB aren't supported with the DescribeLocationFsxOpenZfs operation. # Arguments @@ -1318,12 +1438,12 @@ end describe_location_fsx_windows(location_arn) describe_location_fsx_windows(location_arn, params::Dict{String,<:Any}) -Returns metadata about an Amazon FSx for Windows File Server location, such as information -about its path. +Provides details about how an DataSync transfer location for an Amazon FSx for Windows File +Server file system is configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the FSx for Windows File Server - location to describe. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the FSx for Windows File + Server location. """ function describe_location_fsx_windows( @@ -1355,11 +1475,11 @@ end describe_location_hdfs(location_arn) describe_location_hdfs(location_arn, params::Dict{String,<:Any}) -Returns metadata, such as the authentication information about the Hadoop Distributed File -System (HDFS) location. +Provides details about how an DataSync transfer location for a Hadoop Distributed File +System (HDFS) is configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the HDFS cluster location to describe. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the HDFS location. """ function describe_location_hdfs( @@ -1391,10 +1511,12 @@ end describe_location_nfs(location_arn) describe_location_nfs(location_arn, params::Dict{String,<:Any}) -Returns metadata, such as the path information, about an NFS location. +Provides details about how an DataSync transfer location for a Network File System (NFS) +file server is configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the NFS location to describe. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the NFS location that you + want information about. """ function describe_location_nfs( @@ -1426,11 +1548,12 @@ end describe_location_object_storage(location_arn) describe_location_object_storage(location_arn, params::Dict{String,<:Any}) -Returns metadata about your DataSync location for an object storage system. +Provides details about how an DataSync transfer location for an object storage system is +configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the object storage system location that - you want information about. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the object storage system + location. """ function describe_location_object_storage( @@ -1462,11 +1585,10 @@ end describe_location_s3(location_arn) describe_location_s3(location_arn, params::Dict{String,<:Any}) -Returns metadata, such as bucket name, about an Amazon S3 bucket location. +Provides details about how an DataSync transfer location for an S3 bucket is configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the Amazon S3 bucket location to - describe. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the Amazon S3 location. """ function describe_location_s3( @@ -1498,10 +1620,12 @@ end describe_location_smb(location_arn) describe_location_smb(location_arn, params::Dict{String,<:Any}) -Returns metadata, such as the path and user information about an SMB location. +Provides details about how an DataSync transfer location for a Server Message Block (SMB) +file server is configured. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the SMB location to describe. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the SMB location that you + want information about. """ function describe_location_smb( @@ -1698,10 +1822,11 @@ end describe_task(task_arn) describe_task(task_arn, params::Dict{String,<:Any}) -Returns metadata about a task. +Provides information about a task, which defines where and how DataSync transfers your data. # Arguments -- `task_arn`: The Amazon Resource Name (ARN) of the task to describe. +- `task_arn`: Specifies the Amazon Resource Name (ARN) of the transfer task that you want + information about. """ function describe_task(TaskArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1727,10 +1852,12 @@ end describe_task_execution(task_execution_arn) describe_task_execution(task_execution_arn, params::Dict{String,<:Any}) -Returns detailed metadata about a task that is being executed. +Provides information about an execution of your DataSync task. You can use this operation +to help monitor the progress of an ongoing transfer or check the results of the transfer. # Arguments -- `task_execution_arn`: The Amazon Resource Name (ARN) of the task that is being executed. +- `task_execution_arn`: Specifies the Amazon Resource Name (ARN) of the task execution that + you want information about. """ function describe_task_execution( @@ -1768,9 +1895,7 @@ Creates recommendations about where to migrate your data to in Amazon Web Servic Recommendations are generated based on information that DataSync Discovery collects about your on-premises storage system's resources. For more information, see Recommendations provided by DataSync Discovery. Once generated, you can view your recommendations by using -the DescribeStorageSystemResources operation. If your discovery job completes -successfully, you don't need to use this operation. DataSync Discovery generates the -recommendations for you automatically. +the DescribeStorageSystemResources operation. # Arguments - `discovery_job_arn`: Specifies the Amazon Resource Name (ARN) of the discovery job that @@ -1983,14 +2108,15 @@ end list_task_executions() list_task_executions(params::Dict{String,<:Any}) -Returns a list of executed tasks. +Returns a list of executions for an DataSync transfer task. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of executed tasks to list. -- `"NextToken"`: An opaque string that indicates the position at which to begin the next - list of the executed tasks. -- `"TaskArn"`: The Amazon Resource Name (ARN) of the task whose tasks you want to list. +- `"MaxResults"`: Specifies how many results you want in the response. +- `"NextToken"`: Specifies an opaque string that indicates the position at which to begin + the next list of results in the response. +- `"TaskArn"`: Specifies the Amazon Resource Name (ARN) of the task that you want execution + information about. """ function list_task_executions(; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2140,10 +2266,11 @@ end start_task_execution(task_arn) start_task_execution(task_arn, params::Dict{String,<:Any}) -Starts an DataSync task. For each task, you can only run one task execution at a time. -There are several phases to a task execution. For more information, see Task execution -statuses. If you're planning to transfer data to or from an Amazon S3 location, review how -DataSync can affect your S3 request charges and the DataSync pricing page before you begin. +Starts an DataSync transfer task. For each task, you can only run one task execution at a +time. There are several phases to a task execution. For more information, see Task +execution statuses. If you're planning to transfer data to or from an Amazon S3 location, +review how DataSync can affect your S3 request charges and the DataSync pricing page before +you begin. # Arguments - `task_arn`: Specifies the Amazon Resource Name (ARN) of the task that you want to start. @@ -2158,10 +2285,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\". +- `"ManifestConfig"`: Configures a manifest, which is a list of files or objects that you + want DataSync to transfer. For more information and configuration examples, see Specifying + what DataSync transfers by using a manifest. When using this parameter, your caller + identity (the role that you're using DataSync with) must have the iam:PassRole permission. + The AWSDataSyncFullAccess policy includes this permission. To remove a manifest + configuration, specify this parameter with an empty value. - `"OverrideOptions"`: - `"Tags"`: Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution. Tags are key-value pairs that help you manage, filter, and search for your DataSync resources. +- `"TaskReportConfig"`: Specifies how you want to configure a task report, which provides + detailed information about your DataSync transfer. For more information, see Monitoring + your DataSync transfers with task reports. When using this parameter, your caller identity + (the role that you're using DataSync with) must have the iam:PassRole permission. The + AWSDataSyncFullAccess policy includes this permission. To remove a task report + configuration, specify this parameter as empty. """ function start_task_execution(TaskArn; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2311,7 +2450,7 @@ end update_agent(agent_arn) update_agent(agent_arn, params::Dict{String,<:Any}) -Updates the name of an agent. +Updates the name of an DataSync agent. # Arguments - `agent_arn`: The Amazon Resource Name (ARN) of the agent to update. @@ -2395,6 +2534,61 @@ function update_discovery_job( ) end +""" + update_location_azure_blob(location_arn) + update_location_azure_blob(location_arn, params::Dict{String,<:Any}) + +Modifies some configurations of the Microsoft Azure Blob Storage transfer location that +you're using with DataSync. + +# Arguments +- `location_arn`: Specifies the ARN of the Azure Blob Storage transfer location that you're + updating. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessTier"`: Specifies the access tier that you want your objects or files transferred + into. This only applies when using the location as a transfer destination. For more + information, see Access tiers. +- `"AgentArns"`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that can + connect with your Azure Blob Storage container. You can specify more than one agent. For + more information, see Using multiple agents for your transfer. +- `"AuthenticationType"`: Specifies the authentication method DataSync uses to access your + Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS). +- `"BlobType"`: Specifies the type of blob that you want your objects or files to be when + transferring them into Azure Blob Storage. Currently, DataSync only supports moving data + into Azure Blob Storage as block blobs. For more information on blob types, see the Azure + Blob Storage documentation. +- `"SasConfiguration"`: Specifies the SAS configuration that allows DataSync to access your + Azure Blob Storage. +- `"Subdirectory"`: Specifies path segments if you want to limit your transfer to a virtual + directory in your container (for example, /my/images). +""" +function update_location_azure_blob( + LocationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return datasync( + "UpdateLocationAzureBlob", + Dict{String,Any}("LocationArn" => LocationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_location_azure_blob( + LocationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "UpdateLocationAzureBlob", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("LocationArn" => LocationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_location_hdfs(location_arn) update_location_hdfs(location_arn, params::Dict{String,<:Any}) @@ -2464,32 +2658,22 @@ end update_location_nfs(location_arn) update_location_nfs(location_arn, params::Dict{String,<:Any}) -Updates some of the parameters of a previously created location for Network File System -(NFS) access. For information about creating an NFS location, see Creating a location for -NFS. +Modifies some configurations of the Network File System (NFS) transfer location that you're +using with DataSync. For more information, see Configuring transfers to or from an NFS file +server. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the NFS location to update. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the NFS transfer location + that you want to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MountOptions"`: - `"OnPremConfig"`: -- `"Subdirectory"`: The subdirectory in the NFS file system that is used to read data from - the NFS source location or write data to the NFS destination. The NFS path should be a path - that's exported by the NFS server, or a subdirectory of that path. The path should be such - that it can be mounted by other NFS clients in your network. To see all the paths exported - by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access - to your server. You can specify any directory that appears in the results, and any - subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos - authentication. To transfer all the data in the folder that you specified, DataSync must - have permissions to read all the data. To ensure this, either configure the NFS export with - no_root_squash, or ensure that the files you want DataSync to access have permissions that - allow read access for all users. Doing either option enables the agent to read the files. - For the agent to access directories, you must additionally enable all execute access. If - you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more - information. For information about NFS export configuration, see 18.7. The /etc/exports - Configuration File in the Red Hat Enterprise Linux documentation. +- `"Subdirectory"`: Specifies the export path in your NFS file server that you want + DataSync to mount. This path (or a subdirectory of the path) is where DataSync transfers + data to or from. For information on configuring an export for DataSync, see Accessing NFS + file servers. """ function update_location_nfs(LocationArn; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2518,9 +2702,7 @@ end update_location_object_storage(location_arn) update_location_object_storage(location_arn, params::Dict{String,<:Any}) -Updates some parameters of an existing object storage location that DataSync accesses for a -transfer. For information about creating a self-managed object storage location, see -Creating a location for object storage. +Updates some parameters of an existing DataSync location for an object storage system. # Arguments - `location_arn`: Specifies the ARN of the object storage system location that you're @@ -2534,11 +2716,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys securely connect with your location. - `"SecretKey"`: Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server. -- `"ServerCertificate"`: Specifies a certificate to authenticate with an object storage - system that uses a private or self-signed certificate authority (CA). You must specify a - Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). - The certificate can be up to 32768 bytes (before Base64 encoding). To use this parameter, - configure ServerProtocol to HTTPS. Updating the certificate doesn't interfere with tasks +- `"ServerCertificate"`: Specifies a certificate chain for DataSync to authenticate with + your object storage system if the system uses a private or self-signed certificate + authority (CA). You must specify a single .pem file with a full certificate chain (for + example, file:///home/user/.ssh/object_storage_certificates.pem). The certificate chain + might include: The object storage system's certificate All intermediate certificates + (if there are any) The root certificate of the signing CA You can concatenate your + certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The + following example cat command creates an object_storage_certificates.pem file that includes + three certificates: cat object_server_certificate.pem intermediate_certificate.pem + ca_root_certificate.pem > object_storage_certificates.pem To use this parameter, + configure ServerProtocol to HTTPS. Updating this parameter doesn't interfere with tasks that you have in progress. - `"ServerPort"`: Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443). @@ -2577,35 +2765,34 @@ end update_location_smb(location_arn) update_location_smb(location_arn, params::Dict{String,<:Any}) -Updates some of the parameters of a previously created location for Server Message Block -(SMB) file system access. For information about creating an SMB location, see Creating a -location for SMB. +Updates some of the parameters of a Server Message Block (SMB) file server location that +you can use for DataSync transfers. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the SMB location to update. +- `location_arn`: Specifies the ARN of the SMB location that you want to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentArns"`: The Amazon Resource Names (ARNs) of agents to use for a Simple Message - Block (SMB) location. -- `"Domain"`: The name of the Windows domain that the SMB server belongs to. +- `"AgentArns"`: Specifies the DataSync agent (or agents) which you want to connect to your + SMB file server. You specify an agent by using its Amazon Resource Name (ARN). +- `"Domain"`: Specifies the Windows domain name that your SMB file server belongs to. If + you have multiple domains in your environment, configuring this parameter makes sure that + DataSync connects to the right file server. For more information, see required permissions + for SMB locations. - `"MountOptions"`: -- `"Password"`: The password of the user who can mount the share has the permissions to - access files and folders in the SMB share. -- `"Subdirectory"`: The subdirectory in the SMB file system that is used to read data from - the SMB source location or write data to the SMB destination. The SMB path should be a path - that's exported by the SMB server, or a subdirectory of that path. The path should be such - that it can be mounted by other SMB clients in your network. Subdirectory must be - specified with forward slashes. For example, /path/to/folder. To transfer all the data in - the folder that you specified, DataSync must have permissions to mount the SMB share and to - access all the data in that share. To ensure this, do either of the following: Ensure - that the user/password specified belongs to the user who can mount the share and who has - the appropriate permissions for all of the files and directories that you want DataSync to - access. Use credentials of a member of the Backup Operators group to mount the share. - Doing either of these options enables the agent to access the data. For the agent to access - directories, you must also enable all execute access. -- `"User"`: The user who can mount the share has the permissions to access files and - folders in the SMB share. +- `"Password"`: Specifies the password of the user who can mount your SMB file server and + has permission to access the files and folders involved in your transfer. For more + information, see required permissions for SMB locations. +- `"Subdirectory"`: Specifies the name of the share exported by your SMB file server where + DataSync will read or write data. You can include a subdirectory in the share path (for + example, /path/to/subdirectory). Make sure that other SMB clients in your network can also + mount this path. To copy all data in the specified subdirectory, DataSync must be able to + mount the SMB share and access all of its data. For more information, see required + permissions for SMB locations. +- `"User"`: Specifies the user name that can mount your SMB file server and has permission + to access the files and folders involved in your transfer. For information about choosing a + user with the right level of access for your transfer, see required permissions for SMB + locations. """ function update_location_smb(LocationArn; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2644,7 +2831,7 @@ with DataSync Discovery. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AgentArns"`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that - connects to and reads your on-premises storage system. + connects to and reads your on-premises storage system. You can only specify one ARN. - `"CloudWatchLogGroupArn"`: Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events. - `"Credentials"`: Specifies the user name and password for accessing your on-premises @@ -2684,25 +2871,38 @@ end update_task(task_arn) update_task(task_arn, params::Dict{String,<:Any}) -Updates the metadata associated with a task. +Updates the configuration of a task, which defines where and how DataSync transfers your +data. # Arguments -- `task_arn`: The Amazon Resource Name (ARN) of the resource name of the task to update. +- `task_arn`: Specifies the ARN of the task that you want to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CloudWatchLogGroupArn"`: The Amazon Resource Name (ARN) of the resource name of the - Amazon CloudWatch log group. -- `"Excludes"`: Specifies a list of filter rules that exclude specific data during your - transfer. For more information and examples, see Filtering data transferred by DataSync. -- `"Includes"`: Specifies a list of filter rules that include specific data during your - transfer. For more information and examples, see Filtering data transferred by DataSync. -- `"Name"`: The name of the task to update. +- `"CloudWatchLogGroupArn"`: Specifies the Amazon Resource Name (ARN) of an Amazon + CloudWatch log group for monitoring your task. +- `"Excludes"`: Specifies exclude filters that define the files, objects, and folders in + your source location that you don't want DataSync to transfer. For more information and + examples, see Specifying what DataSync transfers by using filters. +- `"Includes"`: Specifies include filters define the files, objects, and folders in your + source location that you want DataSync to transfer. For more information and examples, see + Specifying what DataSync transfers by using filters. +- `"ManifestConfig"`: Configures a manifest, which is a list of files or objects that you + want DataSync to transfer. For more information and configuration examples, see Specifying + what DataSync transfers by using a manifest. When using this parameter, your caller + identity (the IAM role that you're using DataSync with) must have the iam:PassRole + permission. The AWSDataSyncFullAccess policy includes this permission. To remove a manifest + configuration, specify this parameter as empty. +- `"Name"`: Specifies the name of your task. - `"Options"`: -- `"Schedule"`: Specifies a schedule used to periodically transfer files from a source to a - destination location. You can configure your task to execute hourly, daily, weekly or on - specific days of the week. You control when in the day or hour you want the task to - execute. The time you specify is UTC time. For more information, see Scheduling your task. +- `"Schedule"`: Specifies a schedule for when you want your task to run. For more + information, see Scheduling your task. +- `"TaskReportConfig"`: Specifies how you want to configure a task report, which provides + detailed information about your DataSync transfer. For more information, see Monitoring + your DataSync transfers with task reports. When using this parameter, your caller identity + (the IAM role that you're using DataSync with) must have the iam:PassRole permission. The + AWSDataSyncFullAccess policy includes this permission. To remove a task report + configuration, specify this parameter as empty. """ function update_task(TaskArn; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2727,9 +2927,9 @@ end update_task_execution(options, task_execution_arn) update_task_execution(options, task_execution_arn, params::Dict{String,<:Any}) -Modifies a running DataSync task. Currently, the only Option that you can modify with -UpdateTaskExecution is BytesPerSecond , which throttles bandwidth for a running or queued -task. +Updates the configuration of a running DataSync task execution. Currently, the only Option +that you can modify with UpdateTaskExecution is BytesPerSecond , which throttles bandwidth +for a running or queued task execution. # Arguments - `options`: diff --git a/src/services/datazone.jl b/src/services/datazone.jl new file mode 100644 index 0000000000..522ae7d8ab --- /dev/null +++ b/src/services/datazone.jl @@ -0,0 +1,5411 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: datazone +using AWS.Compat +using AWS.UUIDs + +""" + accept_predictions(domain_identifier, identifier) + accept_predictions(domain_identifier, identifier, params::Dict{String,<:Any}) + +Accepts automatically generated business-friendly metadata for your Amazon DataZone assets. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `identifier`: The identifier of the asset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptChoices"`: Specifies the prediction (aka, the automatically generated piece of + metadata) and the target (for example, a column name) that can be accepted. +- `"acceptRule"`: Specifies the rule (or the conditions) under which a prediction can be + accepted. +- `"clientToken"`: A unique, case-sensitive identifier to ensure idempotency of the + request. This field is automatically populated if not provided. +- `"revision"`: The revision that is to be made to the asset. +""" +function accept_predictions( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/accept-predictions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_predictions( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/accept-predictions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + accept_subscription_request(domain_identifier, identifier) + accept_subscription_request(domain_identifier, identifier, params::Dict{String,<:Any}) + +Accepts a subscription request to a specific asset. + +# Arguments +- `domain_identifier`: The Amazon DataZone domain where the specified subscription request + is being accepted. +- `identifier`: The unique identifier of the subscription request that is to be accepted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"decisionComment"`: A description that specifies the reason for accepting the specified + subscription request. +""" +function accept_subscription_request( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)/accept"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_subscription_request( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)/accept", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_environment_role(domain_identifier, environment_identifier, environment_role_arn) + associate_environment_role(domain_identifier, environment_identifier, environment_role_arn, params::Dict{String,<:Any}) + +Associates the environment role in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the environment role + is associated. +- `environment_identifier`: The ID of the Amazon DataZone environment. +- `environment_role_arn`: The ARN of the environment role. + +""" +function associate_environment_role( + domainIdentifier, + environmentIdentifier, + environmentRoleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_environment_role( + domainIdentifier, + environmentIdentifier, + environmentRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_metadata_generation_run(domain_identifier, identifier) + cancel_metadata_generation_run(domain_identifier, identifier, params::Dict{String,<:Any}) + +Cancels the metadata generation run. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the metadata + generation run is to be cancelled. +- `identifier`: The ID of the metadata generation run. + +""" +function cancel_metadata_generation_run( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs/$(identifier)/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_metadata_generation_run( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs/$(identifier)/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_subscription(domain_identifier, identifier) + cancel_subscription(domain_identifier, identifier, params::Dict{String,<:Any}) + +Cancels the subscription to the specified asset. + +# Arguments +- `domain_identifier`: The unique identifier of the Amazon DataZone domain where the + subscription request is being cancelled. +- `identifier`: The unique identifier of the subscription that is being cancelled. + +""" +function cancel_subscription( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_subscription( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_asset(domain_identifier, name, owning_project_identifier, type_identifier) + create_asset(domain_identifier, name, owning_project_identifier, type_identifier, params::Dict{String,<:Any}) + +Creates an asset in Amazon DataZone catalog. + +# Arguments +- `domain_identifier`: Amazon DataZone domain where the asset is created. +- `name`: Asset name. +- `owning_project_identifier`: The unique identifier of the project that owns this asset. +- `type_identifier`: The unique identifier of this asset's type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: Asset description. +- `"externalIdentifier"`: The external identifier of the asset. +- `"formsInput"`: Metadata forms attached to the asset. +- `"glossaryTerms"`: Glossary terms attached to the asset. +- `"predictionConfiguration"`: The configuration of the automatically generated + business-friendly metadata for the asset. +- `"typeRevision"`: The revision of this asset's type. +""" +function create_asset( + domainIdentifier, + name, + owningProjectIdentifier, + typeIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets", + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "typeIdentifier" => typeIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset( + domainIdentifier, + name, + owningProjectIdentifier, + typeIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "typeIdentifier" => typeIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_asset_revision(domain_identifier, identifier, name) + create_asset_revision(domain_identifier, identifier, name, params::Dict{String,<:Any}) + +Creates a revision of the asset. + +# Arguments +- `domain_identifier`: The unique identifier of the domain where the asset is being revised. +- `identifier`: The identifier of the asset. +- `name`: Te revised name of the asset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The revised description of the asset. +- `"formsInput"`: The metadata forms to be attached to the asset as part of asset revision. +- `"glossaryTerms"`: The glossary terms to be attached to the asset as part of asset + revision. +- `"predictionConfiguration"`: The configuration of the automatically generated + business-friendly metadata for the asset. +- `"typeRevision"`: The revision type of the asset. +""" +function create_asset_revision( + domainIdentifier, identifier, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/revisions", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_revision( + domainIdentifier, + identifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/revisions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_asset_type(domain_identifier, forms_input, name, owning_project_identifier) + create_asset_type(domain_identifier, forms_input, name, owning_project_identifier, params::Dict{String,<:Any}) + +Creates a custom asset type. + +# Arguments +- `domain_identifier`: The unique identifier of the Amazon DataZone domain where the custom + asset type is being created. +- `forms_input`: The metadata forms that are to be attached to the custom asset type. +- `name`: The name of the custom asset type. +- `owning_project_identifier`: The identifier of the Amazon DataZone project that is to own + the custom asset type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The descripton of the custom asset type. +""" +function create_asset_type( + domainIdentifier, + formsInput, + name, + owningProjectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/asset-types", + Dict{String,Any}( + "formsInput" => formsInput, + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_type( + domainIdentifier, + formsInput, + name, + owningProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/asset-types", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "formsInput" => formsInput, + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_data_source(domain_identifier, environment_identifier, name, project_identifier, type) + create_data_source(domain_identifier, environment_identifier, name, project_identifier, type, params::Dict{String,<:Any}) + +Creates an Amazon DataZone data source. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where the data source is + created. +- `environment_identifier`: The unique identifier of the Amazon DataZone environment to + which the data source publishes assets. +- `name`: The name of the data source. +- `project_identifier`: The identifier of the Amazon DataZone project in which you want to + add this data source. +- `type`: The type of the data source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetFormsInput"`: The metadata forms that are to be attached to the assets that this + data source works with. +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"configuration"`: Specifies the configuration of the data source. It can be set to + either glueRunConfiguration or redshiftRunConfiguration. +- `"description"`: The description of the data source. +- `"enableSetting"`: Specifies whether the data source is enabled. +- `"publishOnImport"`: Specifies whether the assets that this data source creates in the + inventory are to be also automatically published to the catalog. +- `"recommendation"`: Specifies whether the business name generation is to be enabled for + this data source. +- `"schedule"`: The schedule of the data source runs. +""" +function create_data_source( + domainIdentifier, + environmentIdentifier, + name, + projectIdentifier, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-sources", + Dict{String,Any}( + "environmentIdentifier" => environmentIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_source( + domainIdentifier, + environmentIdentifier, + name, + projectIdentifier, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-sources", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "environmentIdentifier" => environmentIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_domain(domain_execution_role, name) + create_domain(domain_execution_role, name, params::Dict{String,<:Any}) + +Creates an Amazon DataZone domain. + +# Arguments +- `domain_execution_role`: The domain execution role that is created when an Amazon + DataZone domain is created. The domain execution role is created in the Amazon Web Services + account that houses the Amazon DataZone domain. +- `name`: The name of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the Amazon DataZone domain. +- `"kmsKeyIdentifier"`: The identifier of the Amazon Web Services Key Management Service + (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data. +- `"singleSignOn"`: The single-sign on configuration of the Amazon DataZone domain. +- `"tags"`: The tags specified for the Amazon DataZone domain. +""" +function create_domain( + domainExecutionRole, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains", + Dict{String,Any}( + "domainExecutionRole" => domainExecutionRole, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_domain( + domainExecutionRole, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "domainExecutionRole" => domainExecutionRole, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_environment(domain_identifier, environment_profile_identifier, name, project_identifier) + create_environment(domain_identifier, environment_profile_identifier, name, project_identifier, params::Dict{String,<:Any}) + +Create an Amazon DataZone environment. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which the + environment is created. +- `environment_profile_identifier`: The identifier of the environment profile that is used + to create this Amazon DataZone environment. +- `name`: The name of the Amazon DataZone environment. +- `project_identifier`: The identifier of the Amazon DataZone project in which this + environment is created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the Amazon DataZone environment. +- `"environmentAccountIdentifier"`: The ID of the account in which the environment is being + created. +- `"environmentAccountRegion"`: The region of the account in which the environment is being + created. +- `"environmentBlueprintIdentifier"`: The ID of the blueprint with which the environment is + being created. +- `"glossaryTerms"`: The glossary terms that can be used in this Amazon DataZone + environment. +- `"userParameters"`: The user parameters of this Amazon DataZone environment. +""" +function create_environment( + domainIdentifier, + environmentProfileIdentifier, + name, + projectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments", + Dict{String,Any}( + "environmentProfileIdentifier" => environmentProfileIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_environment( + domainIdentifier, + environmentProfileIdentifier, + name, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "environmentProfileIdentifier" => environmentProfileIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_environment_action(domain_identifier, environment_identifier, name, parameters) + create_environment_action(domain_identifier, environment_identifier, name, parameters, params::Dict{String,<:Any}) + +Creates an action for the environment, for example, creates a console link for an analytics +tool that is available in this environment. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the environment action + is created. +- `environment_identifier`: The ID of the environment in which the environment action is + created. +- `name`: The name of the environment action. +- `parameters`: The parameters of the environment action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the environment action that is being created in the + environment. +""" +function create_environment_action( + domainIdentifier, + environmentIdentifier, + name, + parameters; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions", + Dict{String,Any}("name" => name, "parameters" => parameters); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_environment_action( + domainIdentifier, + environmentIdentifier, + name, + parameters, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("name" => name, "parameters" => parameters), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_environment_profile(domain_identifier, environment_blueprint_identifier, name, project_identifier) + create_environment_profile(domain_identifier, environment_blueprint_identifier, name, project_identifier, params::Dict{String,<:Any}) + +Creates an Amazon DataZone environment profile. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this environment + profile is created. +- `environment_blueprint_identifier`: The ID of the blueprint with which this environment + profile is created. +- `name`: The name of this Amazon DataZone environment profile. +- `project_identifier`: The identifier of the project in which to create the environment + profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsAccountId"`: The Amazon Web Services account in which the Amazon DataZone + environment is created. +- `"awsAccountRegion"`: The Amazon Web Services region in which this environment profile is + created. +- `"description"`: The description of this Amazon DataZone environment profile. +- `"userParameters"`: The user parameters of this Amazon DataZone environment profile. +""" +function create_environment_profile( + domainIdentifier, + environmentBlueprintIdentifier, + name, + projectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environment-profiles", + Dict{String,Any}( + "environmentBlueprintIdentifier" => environmentBlueprintIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_environment_profile( + domainIdentifier, + environmentBlueprintIdentifier, + name, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environment-profiles", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "environmentBlueprintIdentifier" => environmentBlueprintIdentifier, + "name" => name, + "projectIdentifier" => projectIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_form_type(domain_identifier, model, name, owning_project_identifier) + create_form_type(domain_identifier, model, name, owning_project_identifier, params::Dict{String,<:Any}) + +Creates a metadata form type. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this metadata form + type is created. +- `model`: The model of this Amazon DataZone metadata form type. +- `name`: The name of this Amazon DataZone metadata form type. +- `owning_project_identifier`: The ID of the Amazon DataZone project that owns this + metadata form type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of this Amazon DataZone metadata form type. +- `"status"`: The status of this Amazon DataZone metadata form type. +""" +function create_form_type( + domainIdentifier, + model, + name, + owningProjectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/form-types", + Dict{String,Any}( + "model" => model, + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_form_type( + domainIdentifier, + model, + name, + owningProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/form-types", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "model" => model, + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_glossary(domain_identifier, name, owning_project_identifier) + create_glossary(domain_identifier, name, owning_project_identifier, params::Dict{String,<:Any}) + +Creates an Amazon DataZone business glossary. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this business glossary + is created. +- `name`: The name of this business glossary. +- `owning_project_identifier`: The ID of the project that currently owns business glossary. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of this business glossary. +- `"status"`: The status of this business glossary. +""" +function create_glossary( + domainIdentifier, + name, + owningProjectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/glossaries", + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_glossary( + domainIdentifier, + name, + owningProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/glossaries", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_glossary_term(domain_identifier, glossary_identifier, name) + create_glossary_term(domain_identifier, glossary_identifier, name, params::Dict{String,<:Any}) + +Creates a business glossary term. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this business glossary + term is created. +- `glossary_identifier`: The ID of the business glossary in which this term is created. +- `name`: The name of this business glossary term. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"longDescription"`: The long description of this business glossary term. +- `"shortDescription"`: The short description of this business glossary term. +- `"status"`: The status of this business glossary term. +- `"termRelations"`: The term relations of this business glossary term. +""" +function create_glossary_term( + domainIdentifier, + glossaryIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/glossary-terms", + Dict{String,Any}( + "glossaryIdentifier" => glossaryIdentifier, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_glossary_term( + domainIdentifier, + glossaryIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/glossary-terms", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "glossaryIdentifier" => glossaryIdentifier, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_group_profile(domain_identifier, group_identifier) + create_group_profile(domain_identifier, group_identifier, params::Dict{String,<:Any}) + +Creates a group profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which the group + profile is created. +- `group_identifier`: The identifier of the group for which the group profile is created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function create_group_profile( + domainIdentifier, groupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/group-profiles", + Dict{String,Any}( + "groupIdentifier" => groupIdentifier, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_group_profile( + domainIdentifier, + groupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/group-profiles", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "groupIdentifier" => groupIdentifier, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_listing_change_set(action, domain_identifier, entity_identifier, entity_type) + create_listing_change_set(action, domain_identifier, entity_identifier, entity_type, params::Dict{String,<:Any}) + +Publishes a listing (a record of an asset at a given time) or removes a listing from the +catalog. + +# Arguments +- `action`: Specifies whether to publish or unpublish a listing. +- `domain_identifier`: The ID of the Amazon DataZone domain. +- `entity_identifier`: The ID of the asset. +- `entity_type`: The type of an entity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"entityRevision"`: The revision of an asset. +""" +function create_listing_change_set( + action, + domainIdentifier, + entityIdentifier, + entityType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/listings/change-set", + Dict{String,Any}( + "action" => action, + "entityIdentifier" => entityIdentifier, + "entityType" => entityType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_listing_change_set( + action, + domainIdentifier, + entityIdentifier, + entityType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/listings/change-set", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "action" => action, + "entityIdentifier" => entityIdentifier, + "entityType" => entityType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_project(domain_identifier, name) + create_project(domain_identifier, name, params::Dict{String,<:Any}) + +Creates an Amazon DataZone project. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this project is + created. +- `name`: The name of the Amazon DataZone project. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the Amazon DataZone project. +- `"glossaryTerms"`: The glossary terms that can be used in this Amazon DataZone project. +""" +function create_project( + domainIdentifier, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_project( + domainIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_project_membership(designation, domain_identifier, member, project_identifier) + create_project_membership(designation, domain_identifier, member, project_identifier, params::Dict{String,<:Any}) + +Creates a project membership in Amazon DataZone. + +# Arguments +- `designation`: The designation of the project membership. +- `domain_identifier`: The ID of the Amazon DataZone domain in which project membership is + created. +- `member`: The project member whose project membership was created. +- `project_identifier`: The ID of the project for which this project membership was created. + +""" +function create_project_membership( + designation, + domainIdentifier, + member, + projectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/createMembership", + Dict{String,Any}("designation" => designation, "member" => member); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_project_membership( + designation, + domainIdentifier, + member, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/createMembership", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("designation" => designation, "member" => member), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_subscription_grant(domain_identifier, environment_identifier, granted_entity, subscription_target_identifier) + create_subscription_grant(domain_identifier, environment_identifier, granted_entity, subscription_target_identifier, params::Dict{String,<:Any}) + +Creates a subsscription grant in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription grant + is created. +- `environment_identifier`: The ID of the environment in which the subscription grant is + created. +- `granted_entity`: The entity to which the subscription is to be granted. +- `subscription_target_identifier`: The ID of the subscription target for which the + subscription grant is created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetTargetNames"`: The names of the assets for which the subscription grant is created. +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function create_subscription_grant( + domainIdentifier, + environmentIdentifier, + grantedEntity, + subscriptionTargetIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/subscription-grants", + Dict{String,Any}( + "environmentIdentifier" => environmentIdentifier, + "grantedEntity" => grantedEntity, + "subscriptionTargetIdentifier" => subscriptionTargetIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_subscription_grant( + domainIdentifier, + environmentIdentifier, + grantedEntity, + subscriptionTargetIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/subscription-grants", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "environmentIdentifier" => environmentIdentifier, + "grantedEntity" => grantedEntity, + "subscriptionTargetIdentifier" => subscriptionTargetIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_subscription_request(domain_identifier, request_reason, subscribed_listings, subscribed_principals) + create_subscription_request(domain_identifier, request_reason, subscribed_listings, subscribed_principals, params::Dict{String,<:Any}) + +Creates a subscription request in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription + request is created. +- `request_reason`: The reason for the subscription request. +- `subscribed_listings`: The published asset for which the subscription grant is to be + created. +- `subscribed_principals`: The Amazon DataZone principals for whom the subscription request + is created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function create_subscription_request( + domainIdentifier, + requestReason, + subscribedListings, + subscribedPrincipals; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/subscription-requests", + Dict{String,Any}( + "requestReason" => requestReason, + "subscribedListings" => subscribedListings, + "subscribedPrincipals" => subscribedPrincipals, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_subscription_request( + domainIdentifier, + requestReason, + subscribedListings, + subscribedPrincipals, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/subscription-requests", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "requestReason" => requestReason, + "subscribedListings" => subscribedListings, + "subscribedPrincipals" => subscribedPrincipals, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_subscription_target(applicable_asset_types, authorized_principals, domain_identifier, environment_identifier, manage_access_role, name, subscription_target_config, type) + create_subscription_target(applicable_asset_types, authorized_principals, domain_identifier, environment_identifier, manage_access_role, name, subscription_target_config, type, params::Dict{String,<:Any}) + +Creates a subscription target in Amazon DataZone. + +# Arguments +- `applicable_asset_types`: The asset types that can be included in the subscription target. +- `authorized_principals`: The authorized principals of the subscription target. +- `domain_identifier`: The ID of the Amazon DataZone domain in which subscription target is + created. +- `environment_identifier`: The ID of the environment in which subscription target is + created. +- `manage_access_role`: The manage access role that is used to create the subscription + target. +- `name`: The name of the subscription target. +- `subscription_target_config`: The configuration of the subscription target. +- `type`: The type of the subscription target. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"provider"`: The provider of the subscription target. +""" +function create_subscription_target( + applicableAssetTypes, + authorizedPrincipals, + domainIdentifier, + environmentIdentifier, + manageAccessRole, + name, + subscriptionTargetConfig, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets", + Dict{String,Any}( + "applicableAssetTypes" => applicableAssetTypes, + "authorizedPrincipals" => authorizedPrincipals, + "manageAccessRole" => manageAccessRole, + "name" => name, + "subscriptionTargetConfig" => subscriptionTargetConfig, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_subscription_target( + applicableAssetTypes, + authorizedPrincipals, + domainIdentifier, + environmentIdentifier, + manageAccessRole, + name, + subscriptionTargetConfig, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "applicableAssetTypes" => applicableAssetTypes, + "authorizedPrincipals" => authorizedPrincipals, + "manageAccessRole" => manageAccessRole, + "name" => name, + "subscriptionTargetConfig" => subscriptionTargetConfig, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_user_profile(domain_identifier, user_identifier) + create_user_profile(domain_identifier, user_identifier, params::Dict{String,<:Any}) + +Creates a user profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a user profile + is created. +- `user_identifier`: The identifier of the user for which the user profile is created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"userType"`: The user type of the user for which the user profile is created. +""" +function create_user_profile( + domainIdentifier, userIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/user-profiles", + Dict{String,Any}( + "userIdentifier" => userIdentifier, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_user_profile( + domainIdentifier, + userIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/user-profiles", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "userIdentifier" => userIdentifier, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_asset(domain_identifier, identifier) + delete_asset(domain_identifier, identifier, params::Dict{String,<:Any}) + +Delets an asset in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the asset is deleted. +- `identifier`: The identifier of the asset that is deleted. + +""" +function delete_asset( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_asset_type(domain_identifier, identifier) + delete_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an asset type in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the asset type is + deleted. +- `identifier`: The identifier of the asset type that is deleted. + +""" +function delete_asset_type( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset_type( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_data_source(domain_identifier, identifier) + delete_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a data source in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the data source is + deleted. +- `identifier`: The identifier of the data source that is deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"retainPermissionsOnRevokeFailure"`: Specifies that the granted permissions are retained + in case of a self-subscribe functionality failure for a data source. +""" +function delete_data_source( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_source( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_domain(identifier) + delete_domain(identifier, params::Dict{String,<:Any}) + +Deletes a Amazon DataZone domain. + +# Arguments +- `identifier`: The identifier of the Amazon Web Services domain that is to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"skipDeletionCheck"`: Specifies the optional flag to delete all child entities within + the domain. +""" +function delete_domain(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "DELETE", + "/v2/domains/$(identifier)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_domain( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_environment(domain_identifier, identifier) + delete_environment(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an environment in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the environment is + deleted. +- `identifier`: The identifier of the environment that is to be deleted. + +""" +function delete_environment( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_environment( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_environment_action(domain_identifier, environment_identifier, identifier) + delete_environment_action(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an action for the environment, for example, deletes a console link for an analytics +tool that is available in this environment. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which an environment action + is deleted. +- `environment_identifier`: The ID of the environment where an environment action is + deleted. +- `identifier`: The ID of the environment action that is deleted. + +""" +function delete_environment_action( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_environment_action( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_environment_blueprint_configuration(domain_identifier, environment_blueprint_identifier) + delete_environment_blueprint_configuration(domain_identifier, environment_blueprint_identifier, params::Dict{String,<:Any}) + +Deletes the blueprint configuration in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the blueprint + configuration is deleted. +- `environment_blueprint_identifier`: The ID of the blueprint the configuration of which is + deleted. + +""" +function delete_environment_blueprint_configuration( + domainIdentifier, + environmentBlueprintIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_environment_blueprint_configuration( + domainIdentifier, + environmentBlueprintIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_environment_profile(domain_identifier, identifier) + delete_environment_profile(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an environment profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the environment + profile is deleted. +- `identifier`: The ID of the environment profile that is deleted. + +""" +function delete_environment_profile( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_environment_profile( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_form_type(domain_identifier, form_type_identifier) + delete_form_type(domain_identifier, form_type_identifier, params::Dict{String,<:Any}) + +Delets and metadata form type in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the metadata form type + is deleted. +- `form_type_identifier`: The ID of the metadata form type that is deleted. + +""" +function delete_form_type( + domainIdentifier, formTypeIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/form-types/$(formTypeIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_form_type( + domainIdentifier, + formTypeIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/form-types/$(formTypeIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_glossary(domain_identifier, identifier) + delete_glossary(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a business glossary in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the business glossary + is deleted. +- `identifier`: The ID of the business glossary that is deleted. + +""" +function delete_glossary( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_glossary( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_glossary_term(domain_identifier, identifier) + delete_glossary_term(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a business glossary term in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the business glossary + term is deleted. +- `identifier`: The ID of the business glossary term that is deleted. + +""" +function delete_glossary_term( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_glossary_term( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_listing(domain_identifier, identifier) + delete_listing(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a listing (a record of an asset at a given time). + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain. +- `identifier`: The ID of the listing to be deleted. + +""" +function delete_listing( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/listings/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_listing( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/listings/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_project(domain_identifier, identifier) + delete_project(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a project in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the project is deleted. +- `identifier`: The identifier of the project that is to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipDeletionCheck"`: Specifies the optional flag to delete all child entities within + the project. +""" +function delete_project( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_project( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_project_membership(domain_identifier, member, project_identifier) + delete_project_membership(domain_identifier, member, project_identifier, params::Dict{String,<:Any}) + +Deletes project membership in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where project membership is + deleted. +- `member`: The project member whose project membership is deleted. +- `project_identifier`: The ID of the Amazon DataZone project the membership to which is + deleted. + +""" +function delete_project_membership( + domainIdentifier, + member, + projectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/deleteMembership", + Dict{String,Any}("member" => member); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_project_membership( + domainIdentifier, + member, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/deleteMembership", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("member" => member), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_subscription_grant(domain_identifier, identifier) + delete_subscription_grant(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes and subscription grant in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where the subscription grant is + deleted. +- `identifier`: The ID of the subscription grant that is deleted. + +""" +function delete_subscription_grant( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_subscription_grant( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_subscription_request(domain_identifier, identifier) + delete_subscription_request(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a subscription request in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription + request is deleted. +- `identifier`: The ID of the subscription request that is deleted. + +""" +function delete_subscription_request( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_subscription_request( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_subscription_target(domain_identifier, environment_identifier, identifier) + delete_subscription_target(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a subscription target in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription + target is deleted. +- `environment_identifier`: The ID of the Amazon DataZone environment in which the + subscription target is deleted. +- `identifier`: The ID of the subscription target that is deleted. + +""" +function delete_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_time_series_data_points(domain_identifier, entity_identifier, entity_type, form_name) + delete_time_series_data_points(domain_identifier, entity_identifier, entity_type, form_name, params::Dict{String,<:Any}) + +Deletes the specified time series form for the specified asset. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain that houses the asset for which + you want to delete a time series form. +- `entity_identifier`: The ID of the asset for which you want to delete a time series form. +- `entity_type`: The type of the asset for which you want to delete a time series form. +- `form_name`: The name of the time series form that you want to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure idempotency of the + request. This field is automatically populated if not provided. +""" +function delete_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + formName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}("formName" => formName, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + formName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("formName" => formName, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_environment_role(domain_identifier, environment_identifier, environment_role_arn) + disassociate_environment_role(domain_identifier, environment_identifier, environment_role_arn, params::Dict{String,<:Any}) + +Disassociates the environment role in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which an environment role is + disassociated. +- `environment_identifier`: The ID of the environment. +- `environment_role_arn`: The ARN of the environment role. + +""" +function disassociate_environment_role( + domainIdentifier, + environmentIdentifier, + environmentRoleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_environment_role( + domainIdentifier, + environmentIdentifier, + environmentRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_asset(domain_identifier, identifier) + get_asset(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone asset. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain to which the asset belongs. +- `identifier`: The ID of the Amazon DataZone asset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"revision"`: The revision of the Amazon DataZone asset. +""" +function get_asset( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_asset_type(domain_identifier, identifier) + get_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone asset type. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the asset type exists. +- `identifier`: The ID of the asset type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"revision"`: The revision of the asset type. +""" +function get_asset_type( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset_type( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_data_source(domain_identifier, identifier) + get_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone data source. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the data source exists. +- `identifier`: The ID of the Amazon DataZone data source. + +""" +function get_data_source( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_data_source_run(domain_identifier, identifier) + get_data_source_run(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone data source run. + +# Arguments +- `domain_identifier`: The ID of the domain in which this data source run was performed. +- `identifier`: The ID of the data source run. + +""" +function get_data_source_run( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-source-runs/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source_run( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-source-runs/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_domain(identifier) + get_domain(identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone domain. + +# Arguments +- `identifier`: The identifier of the specified Amazon DataZone domain. + +""" +function get_domain(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "GET", + "/v2/domains/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_domain( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment(domain_identifier, identifier) + get_environment(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone environment. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where the environment exists. +- `identifier`: The ID of the Amazon DataZone environment. + +""" +function get_environment( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment_action(domain_identifier, environment_identifier, identifier) + get_environment_action(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Gets the specified environment action. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the + GetEnvironmentAction API is invoked. +- `environment_identifier`: The environment ID of the environment action. +- `identifier`: The ID of the environment action + +""" +function get_environment_action( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_action( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment_blueprint(domain_identifier, identifier) + get_environment_blueprint(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone blueprint. + +# Arguments +- `domain_identifier`: The identifier of the domain in which this blueprint exists. +- `identifier`: The ID of this Amazon DataZone blueprint. + +""" +function get_environment_blueprint( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprints/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_blueprint( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprints/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment_blueprint_configuration(domain_identifier, environment_blueprint_identifier) + get_environment_blueprint_configuration(domain_identifier, environment_blueprint_identifier, params::Dict{String,<:Any}) + +Gets the blueprint configuration in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where this blueprint exists. +- `environment_blueprint_identifier`: He ID of the blueprint. + +""" +function get_environment_blueprint_configuration( + domainIdentifier, + environmentBlueprintIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_blueprint_configuration( + domainIdentifier, + environmentBlueprintIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment_profile(domain_identifier, identifier) + get_environment_profile(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an evinronment profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this environment + profile exists. +- `identifier`: The ID of the environment profile. + +""" +function get_environment_profile( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_profile( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_form_type(domain_identifier, form_type_identifier) + get_form_type(domain_identifier, form_type_identifier, params::Dict{String,<:Any}) + +Gets a metadata form type in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this metadata form + type exists. +- `form_type_identifier`: The ID of the metadata form type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"revision"`: The revision of this metadata form type. +""" +function get_form_type( + domainIdentifier, formTypeIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/form-types/$(formTypeIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_form_type( + domainIdentifier, + formTypeIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/form-types/$(formTypeIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_glossary(domain_identifier, identifier) + get_glossary(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a business glossary in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this business glossary + exists. +- `identifier`: The ID of the business glossary. + +""" +function get_glossary( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_glossary( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_glossary_term(domain_identifier, identifier) + get_glossary_term(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a business glossary term in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this business glossary + term exists. +- `identifier`: The ID of the business glossary term. + +""" +function get_glossary_term( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_glossary_term( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_group_profile(domain_identifier, group_identifier) + get_group_profile(domain_identifier, group_identifier, params::Dict{String,<:Any}) + +Gets a group profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which the group + profile exists. +- `group_identifier`: The identifier of the group profile. + +""" +function get_group_profile( + domainIdentifier, groupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/group-profiles/$(groupIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_group_profile( + domainIdentifier, + groupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/group-profiles/$(groupIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_iam_portal_login_url(domain_identifier) + get_iam_portal_login_url(domain_identifier, params::Dict{String,<:Any}) + +Gets the data portal URL for the specified Amazon DataZone domain. + +# Arguments +- `domain_identifier`: the ID of the Amazon DataZone domain the data portal of which you + want to get. + +""" +function get_iam_portal_login_url( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/get-portal-login-url"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_iam_portal_login_url( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/get-portal-login-url", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_listing(domain_identifier, identifier) + get_listing(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a listing (a record of an asset at a given time). + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain. +- `identifier`: The ID of the listing. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"listingRevision"`: The revision of the listing. +""" +function get_listing( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/listings/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_listing( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/listings/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_metadata_generation_run(domain_identifier, identifier) + get_metadata_generation_run(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a metadata generation run in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain the metadata generation run of + which you want to get. +- `identifier`: The identifier of the metadata generation run. + +""" +function get_metadata_generation_run( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_metadata_generation_run( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_project(domain_identifier, identifier) + get_project(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a project in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the project exists. +- `identifier`: The ID of the project. + +""" +function get_project( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_project( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_subscription(domain_identifier, identifier) + get_subscription(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets a subscription in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription + exists. +- `identifier`: The ID of the subscription. + +""" +function get_subscription( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_subscription( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_subscription_grant(domain_identifier, identifier) + get_subscription_grant(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the subscription grant in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription grant + exists. +- `identifier`: The ID of the subscription grant. + +""" +function get_subscription_grant( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_subscription_grant( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_subscription_request_details(domain_identifier, identifier) + get_subscription_request_details(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the details of the specified subscription request. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to get the + subscription request details. +- `identifier`: The identifier of the subscription request the details of which to get. + +""" +function get_subscription_request_details( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_subscription_request_details( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_subscription_target(domain_identifier, environment_identifier, identifier) + get_subscription_target(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Gets the subscription target in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the subscription + target exists. +- `environment_identifier`: The ID of the environment associated with the subscription + target. +- `identifier`: The ID of the subscription target. + +""" +function get_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_time_series_data_point(domain_identifier, entity_identifier, entity_type, form_name, identifier) + get_time_series_data_point(domain_identifier, entity_identifier, entity_type, form_name, identifier, params::Dict{String,<:Any}) + +Gets the existing data point for the asset. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain that houses the asset for which + you want to get the data point. +- `entity_identifier`: The ID of the asset for which you want to get the data point. +- `entity_type`: The type of the asset for which you want to get the data point. +- `form_name`: The name of the time series form that houses the data point that you want to + get. +- `identifier`: The ID of the data point that you want to get. + +""" +function get_time_series_data_point( + domainIdentifier, + entityIdentifier, + entityType, + formName, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points/$(identifier)", + Dict{String,Any}("formName" => formName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_time_series_data_point( + domainIdentifier, + entityIdentifier, + entityType, + formName, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("formName" => formName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_user_profile(domain_identifier, user_identifier) + get_user_profile(domain_identifier, user_identifier, params::Dict{String,<:Any}) + +Gets a user profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: the ID of the Amazon DataZone domain the data portal of which you + want to get. +- `user_identifier`: The identifier of the user for which you want to get the user profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"type"`: The type of the user profile. +""" +function get_user_profile( + domainIdentifier, userIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/user-profiles/$(userIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_user_profile( + domainIdentifier, + userIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/user-profiles/$(userIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_asset_revisions(domain_identifier, identifier) + list_asset_revisions(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists the revisions for the asset. + +# Arguments +- `domain_identifier`: The identifier of the domain. +- `identifier`: The identifier of the asset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of revisions to return in a single call to + ListAssetRevisions. When the number of revisions to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListAssetRevisions to list the next set of revisions. +- `"nextToken"`: When the number of revisions is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of revisions, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListAssetRevisions to list the next + set of revisions. +""" +function list_asset_revisions( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/revisions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_revisions( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/revisions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_source_run_activities(domain_identifier, identifier) + list_data_source_run_activities(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists data source run activities. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to list data + source run activities. +- `identifier`: The identifier of the data source run. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of activities to return in a single call to + ListDataSourceRunActivities. When the number of activities to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListDataSourceRunActivities to list the next set of activities. +- `"nextToken"`: When the number of activities is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of activities, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListDataSourceRunActivities to list + the next set of activities. +- `"status"`: The status of the data source run. +""" +function list_data_source_run_activities( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-source-runs/$(identifier)/activities"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_source_run_activities( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-source-runs/$(identifier)/activities", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_source_runs(data_source_identifier, domain_identifier) + list_data_source_runs(data_source_identifier, domain_identifier, params::Dict{String,<:Any}) + +Lists data source runs in Amazon DataZone. + +# Arguments +- `data_source_identifier`: The identifier of the data source. +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to invoke the + ListDataSourceRuns action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of runs to return in a single call to + ListDataSourceRuns. When the number of runs to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListDataSourceRuns to list the next set of runs. +- `"nextToken"`: When the number of runs is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of runs, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListDataSourceRuns to list the next + set of runs. +- `"status"`: The status of the data source. +""" +function list_data_source_runs( + dataSourceIdentifier, + domainIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources/$(dataSourceIdentifier)/runs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_source_runs( + dataSourceIdentifier, + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources/$(dataSourceIdentifier)/runs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_sources(domain_identifier, project_identifier) + list_data_sources(domain_identifier, project_identifier, params::Dict{String,<:Any}) + +Lists data sources in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to list the + data sources. +- `project_identifier`: The identifier of the project in which to list data sources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"environmentIdentifier"`: The identifier of the environment in which to list the data + sources. +- `"maxResults"`: The maximum number of data sources to return in a single call to + ListDataSources. When the number of data sources to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListDataSources to list the next set of data sources. +- `"name"`: The name of the data source. +- `"nextToken"`: When the number of data sources is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of data sources, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListDataSources to list the next + set of data sources. +- `"status"`: The status of the data source. +- `"type"`: The type of the data source. +""" +function list_data_sources( + domainIdentifier, projectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources", + Dict{String,Any}("projectIdentifier" => projectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_sources( + domainIdentifier, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-sources", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("projectIdentifier" => projectIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_domains() + list_domains(params::Dict{String,<:Any}) + +Lists Amazon DataZone domains. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of domains to return in a single call to ListDomains. + When the number of domains to be listed is greater than the value of MaxResults, the + response contains a NextToken value that you can use in a subsequent call to ListDomains to + list the next set of domains. +- `"nextToken"`: When the number of domains is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of domains, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListDomains to list the next set of + domains. +- `"status"`: The status of the data source. +""" +function list_domains(; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "GET", "/v2/domains"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_domains( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", "/v2/domains", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_environment_actions(domain_identifier, environment_identifier) + list_environment_actions(domain_identifier, environment_identifier, params::Dict{String,<:Any}) + +Lists existing environment actions. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the environment + actions are listed. +- `environment_identifier`: The ID of the envrironment whose environment actions are listed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of environment actions to return in a single call to + ListEnvironmentActions. When the number of environment actions to be listed is greater than + the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListEnvironmentActions to list the next set of environment actions. +- `"nextToken"`: When the number of environment actions is greater than the default value + for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is + less than the number of environment actions, the response includes a pagination token named + NextToken. You can specify this NextToken value in a subsequent call to + ListEnvironmentActions to list the next set of environment actions. +""" +function list_environment_actions( + domainIdentifier, + environmentIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_environment_actions( + domainIdentifier, + environmentIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_environment_blueprint_configurations(domain_identifier) + list_environment_blueprint_configurations(domain_identifier, params::Dict{String,<:Any}) + +Lists blueprint configurations for a Amazon DataZone environment. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of blueprint configurations to return in a single call + to ListEnvironmentBlueprintConfigurations. When the number of configurations to be listed + is greater than the value of MaxResults, the response contains a NextToken value that you + can use in a subsequent call to ListEnvironmentBlueprintConfigurations to list the next set + of configurations. +- `"nextToken"`: When the number of blueprint configurations is greater than the default + value for the MaxResults parameter, or if you explicitly specify a value for MaxResults + that is less than the number of configurations, the response includes a pagination token + named NextToken. You can specify this NextToken value in a subsequent call to + ListEnvironmentBlueprintConfigurations to list the next set of configurations. +""" +function list_environment_blueprint_configurations( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_environment_blueprint_configurations( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_environment_blueprints(domain_identifier) + list_environment_blueprints(domain_identifier, params::Dict{String,<:Any}) + +Lists blueprints in an Amazon DataZone environment. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"managed"`: Specifies whether the environment blueprint is managed by Amazon DataZone. +- `"maxResults"`: The maximum number of blueprints to return in a single call to + ListEnvironmentBlueprints. When the number of blueprints to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListEnvironmentBlueprints to list the next set of blueprints. +- `"name"`: The name of the Amazon DataZone environment. +- `"nextToken"`: When the number of blueprints in the environment is greater than the + default value for the MaxResults parameter, or if you explicitly specify a value for + MaxResults that is less than the number of blueprints in the environment, the response + includes a pagination token named NextToken. You can specify this NextToken value in a + subsequent call to ListEnvironmentBlueprintsto list the next set of blueprints. +""" +function list_environment_blueprints( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprints"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_environment_blueprints( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-blueprints", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_environment_profiles(domain_identifier) + list_environment_profiles(domain_identifier, params::Dict{String,<:Any}) + +Lists Amazon DataZone environment profiles. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsAccountId"`: The identifier of the Amazon Web Services account where you want to + list environment profiles. +- `"awsAccountRegion"`: The Amazon Web Services region where you want to list environment + profiles. +- `"environmentBlueprintIdentifier"`: The identifier of the blueprint that was used to + create the environment profiles that you want to list. +- `"maxResults"`: The maximum number of environment profiles to return in a single call to + ListEnvironmentProfiles. When the number of environment profiles to be listed is greater + than the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListEnvironmentProfiles to list the next set of environment profiles. +- `"name"`: +- `"nextToken"`: When the number of environment profiles is greater than the default value + for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is + less than the number of environment profiles, the response includes a pagination token + named NextToken. You can specify this NextToken value in a subsequent call to + ListEnvironmentProfiles to list the next set of environment profiles. +- `"projectIdentifier"`: The identifier of the Amazon DataZone project. +""" +function list_environment_profiles( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-profiles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_environment_profiles( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environment-profiles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_environments(domain_identifier, project_identifier) + list_environments(domain_identifier, project_identifier, params::Dict{String,<:Any}) + +Lists Amazon DataZone environments. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `project_identifier`: The identifier of the Amazon DataZone project. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsAccountId"`: The identifier of the Amazon Web Services account where you want to + list environments. +- `"awsAccountRegion"`: The Amazon Web Services region where you want to list environments. +- `"environmentBlueprintIdentifier"`: The identifier of the Amazon DataZone blueprint. +- `"environmentProfileIdentifier"`: The identifier of the environment profile. +- `"maxResults"`: The maximum number of environments to return in a single call to + ListEnvironments. When the number of environments to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListEnvironments to list the next set of environments. +- `"name"`: The name of the environment. +- `"nextToken"`: When the number of environments is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of environments, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListEnvironments to list the next + set of environments. +- `"provider"`: The provider of the environment. +- `"status"`: The status of the environments that you want to list. +""" +function list_environments( + domainIdentifier, projectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments", + Dict{String,Any}("projectIdentifier" => projectIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_environments( + domainIdentifier, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("projectIdentifier" => projectIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_metadata_generation_runs(domain_identifier) + list_metadata_generation_runs(domain_identifier, params::Dict{String,<:Any}) + +Lists all metadata generation runs. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where you want to list metadata + generation runs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of metadata generation runs to return in a single call + to ListMetadataGenerationRuns. When the number of metadata generation runs to be listed is + greater than the value of MaxResults, the response contains a NextToken value that you can + use in a subsequent call to ListMetadataGenerationRuns to list the next set of revisions. +- `"nextToken"`: When the number of metadata generation runs is greater than the default + value for the MaxResults parameter, or if you explicitly specify a value for MaxResults + that is less than the number of metadata generation runs, the response includes a + pagination token named NextToken. You can specify this NextToken value in a subsequent call + to ListMetadataGenerationRuns to list the next set of revisions. +- `"status"`: The status of the metadata generation runs. +- `"type"`: The type of the metadata generation runs. +""" +function list_metadata_generation_runs( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_metadata_generation_runs( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_notifications(domain_identifier, type) + list_notifications(domain_identifier, type, params::Dict{String,<:Any}) + +Lists all Amazon DataZone notifications. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `type`: The type of notifications. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"afterTimestamp"`: The time after which you want to list notifications. +- `"beforeTimestamp"`: The time before which you want to list notifications. +- `"maxResults"`: The maximum number of notifications to return in a single call to + ListNotifications. When the number of notifications to be listed is greater than the value + of MaxResults, the response contains a NextToken value that you can use in a subsequent + call to ListNotifications to list the next set of notifications. +- `"nextToken"`: When the number of notifications is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of notifications, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListNotifications to list the next + set of notifications. +- `"subjects"`: The subjects of notifications. +- `"taskStatus"`: The task status of notifications. +""" +function list_notifications( + domainIdentifier, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/notifications", + Dict{String,Any}("type" => type); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_notifications( + domainIdentifier, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/notifications", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("type" => type), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_project_memberships(domain_identifier, project_identifier) + list_project_memberships(domain_identifier, project_identifier, params::Dict{String,<:Any}) + +Lists all members of the specified project. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which you want to + list project memberships. +- `project_identifier`: The identifier of the project whose memberships you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of memberships to return in a single call to + ListProjectMemberships. When the number of memberships to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListProjectMemberships to list the next set of memberships. +- `"nextToken"`: When the number of memberships is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of memberships, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListProjectMemberships to list the + next set of memberships. +- `"sortBy"`: The method by which you want to sort the project memberships. +- `"sortOrder"`: The sort order of the project memberships. +""" +function list_project_memberships( + domainIdentifier, projectIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/memberships"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_project_memberships( + domainIdentifier, + projectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects/$(projectIdentifier)/memberships", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_projects(domain_identifier) + list_projects(domain_identifier, params::Dict{String,<:Any}) + +Lists Amazon DataZone projects. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"groupIdentifier"`: The identifier of a group. +- `"maxResults"`: The maximum number of projects to return in a single call to + ListProjects. When the number of projects to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListProjects to list the next set of projects. +- `"name"`: The name of the project. +- `"nextToken"`: When the number of projects is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of projects, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListProjects to list the next set of + projects. +- `"userIdentifier"`: The identifier of the Amazon DataZone user. +""" +function list_projects(domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_projects( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/projects", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_subscription_grants(domain_identifier) + list_subscription_grants(domain_identifier, params::Dict{String,<:Any}) + +Lists subscription grants. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"environmentId"`: The identifier of the Amazon DataZone environment. +- `"maxResults"`: The maximum number of subscription grants to return in a single call to + ListSubscriptionGrants. When the number of subscription grants to be listed is greater than + the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListSubscriptionGrants to list the next set of subscription grants. +- `"nextToken"`: When the number of subscription grants is greater than the default value + for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is + less than the number of subscription grants, the response includes a pagination token named + NextToken. You can specify this NextToken value in a subsequent call to + ListSubscriptionGrants to list the next set of subscription grants. +- `"sortBy"`: Specifies the way of sorting the results of this action. +- `"sortOrder"`: Specifies the sort order of this action. +- `"subscribedListingId"`: The identifier of the subscribed listing. +- `"subscriptionId"`: The identifier of the subscription. +- `"subscriptionTargetId"`: The identifier of the subscription target. +""" +function list_subscription_grants( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-grants"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_subscription_grants( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-grants", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_subscription_requests(domain_identifier) + list_subscription_requests(domain_identifier, params::Dict{String,<:Any}) + +Lists Amazon DataZone subscription requests. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"approverProjectId"`: The identifier of the subscription request approver's project. +- `"maxResults"`: The maximum number of subscription requests to return in a single call to + ListSubscriptionRequests. When the number of subscription requests to be listed is greater + than the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListSubscriptionRequests to list the next set of subscription requests. +- `"nextToken"`: When the number of subscription requests is greater than the default value + for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is + less than the number of subscription requests, the response includes a pagination token + named NextToken. You can specify this NextToken value in a subsequent call to + ListSubscriptionRequests to list the next set of subscription requests. +- `"owningProjectId"`: The identifier of the project for the subscription requests. +- `"sortBy"`: Specifies the way to sort the results of this action. +- `"sortOrder"`: Specifies the sort order for the results of this action. +- `"status"`: Specifies the status of the subscription requests. +- `"subscribedListingId"`: The identifier of the subscribed listing. +""" +function list_subscription_requests( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-requests"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_subscription_requests( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscription-requests", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_subscription_targets(domain_identifier, environment_identifier) + list_subscription_targets(domain_identifier, environment_identifier, params::Dict{String,<:Any}) + +Lists subscription targets in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain where you want to list + subscription targets. +- `environment_identifier`: The identifier of the environment where you want to list + subscription targets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of subscription targets to return in a single call to + ListSubscriptionTargets. When the number of subscription targets to be listed is greater + than the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListSubscriptionTargets to list the next set of subscription targets. +- `"nextToken"`: When the number of subscription targets is greater than the default value + for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is + less than the number of subscription targets, the response includes a pagination token + named NextToken. You can specify this NextToken value in a subsequent call to + ListSubscriptionTargets to list the next set of subscription targets. +- `"sortBy"`: Specifies the way in which the results of this action are to be sorted. +- `"sortOrder"`: Specifies the sort order for the results of this action. +""" +function list_subscription_targets( + domainIdentifier, + environmentIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_subscription_targets( + domainIdentifier, + environmentIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_subscriptions(domain_identifier) + list_subscriptions(domain_identifier, params::Dict{String,<:Any}) + +Lists subscriptions in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"approverProjectId"`: The identifier of the project for the subscription's approver. +- `"maxResults"`: The maximum number of subscriptions to return in a single call to + ListSubscriptions. When the number of subscriptions to be listed is greater than the value + of MaxResults, the response contains a NextToken value that you can use in a subsequent + call to ListSubscriptions to list the next set of Subscriptions. +- `"nextToken"`: When the number of subscriptions is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of subscriptions, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListSubscriptions to list the next + set of subscriptions. +- `"owningProjectId"`: The identifier of the owning project. +- `"sortBy"`: Specifies the way in which the results of this action are to be sorted. +- `"sortOrder"`: Specifies the sort order for the results of this action. +- `"status"`: The status of the subscriptions that you want to list. +- `"subscribedListingId"`: The identifier of the subscribed listing for the subscriptions + that you want to list. +- `"subscriptionRequestIdentifier"`: The identifier of the subscription request for the + subscriptions that you want to list. +""" +function list_subscriptions( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscriptions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_subscriptions( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/subscriptions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists tags for the specified resource in Amazon DataZone. + +# Arguments +- `resource_arn`: The ARN of the resource whose tags you want to list. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_time_series_data_points(domain_identifier, entity_identifier, entity_type, form_name) + list_time_series_data_points(domain_identifier, entity_identifier, entity_type, form_name, params::Dict{String,<:Any}) + +Lists time series data points. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain that houses the assets for + which you want to list time series data points. +- `entity_identifier`: The ID of the asset for which you want to list data points. +- `entity_type`: The type of the asset for which you want to list data points. +- `form_name`: The name of the time series data points form. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"endedAt"`: The timestamp at which the data points that you wanted to list ended. +- `"maxResults"`: The maximum number of data points to return in a single call to + ListTimeSeriesDataPoints. When the number of data points to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListTimeSeriesDataPoints to list the next set of data points. +- `"nextToken"`: When the number of data points is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of data points, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListTimeSeriesDataPoints to list + the next set of data points. +- `"startedAt"`: The timestamp at which the data points that you want to list started. +""" +function list_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + formName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}("formName" => formName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + formName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("formName" => formName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms) + post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms, params::Dict{String,<:Any}) + +Posts time series data points to Amazon DataZone for the specified asset. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which you want to post time + series data points. +- `entity_identifier`: The ID of the asset for which you want to post time series data + points. +- `entity_type`: The type of the asset for which you want to post data points. +- `forms`: The forms that contain the data points that you want to post. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function post_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + forms; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}("forms" => forms, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function post_time_series_data_points( + domainIdentifier, + entityIdentifier, + entityType, + forms, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/time-series-data-points", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("forms" => forms, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_environment_blueprint_configuration(domain_identifier, enabled_regions, environment_blueprint_identifier) + put_environment_blueprint_configuration(domain_identifier, enabled_regions, environment_blueprint_identifier, params::Dict{String,<:Any}) + +Writes the configuration for the specified environment blueprint in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `enabled_regions`: Specifies the enabled Amazon Web Services Regions. +- `environment_blueprint_identifier`: The identifier of the environment blueprint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"manageAccessRoleArn"`: The ARN of the manage access role. +- `"provisioningRoleArn"`: The ARN of the provisioning role. +- `"regionalParameters"`: The regional parameters in the environment blueprint. +""" +function put_environment_blueprint_configuration( + domainIdentifier, + enabledRegions, + environmentBlueprintIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)", + Dict{String,Any}("enabledRegions" => enabledRegions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_environment_blueprint_configuration( + domainIdentifier, + enabledRegions, + environmentBlueprintIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/environment-blueprint-configurations/$(environmentBlueprintIdentifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("enabledRegions" => enabledRegions), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + reject_predictions(domain_identifier, identifier) + reject_predictions(domain_identifier, identifier, params::Dict{String,<:Any}) + +Rejects automatically generated business-friendly metadata for your Amazon DataZone assets. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `identifier`: The identifier of the prediction. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"rejectChoices"`: Specifies the prediction (aka, the automatically generated piece of + metadata) and the target (for example, a column name) that can be rejected. +- `"rejectRule"`: Specifies the rule (or the conditions) under which a prediction can be + rejected. +- `"revision"`: The revision that is to be made to the asset. +""" +function reject_predictions( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/reject-predictions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_predictions( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)/reject-predictions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + reject_subscription_request(domain_identifier, identifier) + reject_subscription_request(domain_identifier, identifier, params::Dict{String,<:Any}) + +Rejects the specified subscription request. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which the + subscription request was rejected. +- `identifier`: The identifier of the subscription request that was rejected. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"decisionComment"`: The decision comment of the rejected subscription request. +""" +function reject_subscription_request( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)/reject"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_subscription_request( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)/reject", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + revoke_subscription(domain_identifier, identifier) + revoke_subscription(domain_identifier, identifier, params::Dict{String,<:Any}) + +Revokes a specified subscription in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain where you want to + revoke a subscription. +- `identifier`: The identifier of the revoked subscription. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"retainPermissions"`: Specifies whether permissions are retained when the subscription + is revoked. +""" +function revoke_subscription( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)/revoke"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function revoke_subscription( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/subscriptions/$(identifier)/revoke", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search(domain_identifier, search_scope) + search(domain_identifier, search_scope, params::Dict{String,<:Any}) + +Searches for assets in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain. +- `search_scope`: The scope of the search. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalAttributes"`: Specifies additional attributes for the Search action. +- `"filters"`: Specifies the search filters. +- `"maxResults"`: The maximum number of results to return in a single call to Search. When + the number of results to be listed is greater than the value of MaxResults, the response + contains a NextToken value that you can use in a subsequent call to Search to list the next + set of results. +- `"nextToken"`: When the number of results is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of results, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to Search to list the next set of results. +- `"owningProjectIdentifier"`: The identifier of the owning project specified for the + search. +- `"searchIn"`: The details of the search. +- `"searchText"`: Specifies the text for which to search. +- `"sort"`: Specifies the way in which the search results are to be sorted. +""" +function search( + domainIdentifier, searchScope; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search", + Dict{String,Any}("searchScope" => searchScope); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search( + domainIdentifier, + searchScope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("searchScope" => searchScope), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_group_profiles(domain_identifier, group_type) + search_group_profiles(domain_identifier, group_type, params::Dict{String,<:Any}) + +Searches group profiles in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which you want to + search group profiles. +- `group_type`: The group type for which to search. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in a single call to + SearchGroupProfiles. When the number of results to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to SearchGroupProfiles to list the next set of results. +- `"nextToken"`: When the number of results is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of results, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to SearchGroupProfiles to list the next + set of results. +- `"searchText"`: Specifies the text for which to search. +""" +function search_group_profiles( + domainIdentifier, groupType; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search-group-profiles", + Dict{String,Any}("groupType" => groupType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_group_profiles( + domainIdentifier, + groupType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search-group-profiles", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("groupType" => groupType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_listings(domain_identifier) + search_listings(domain_identifier, params::Dict{String,<:Any}) + +Searches listings (records of an asset at a given time) in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the domain in which to search listings. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalAttributes"`: Specifies additional attributes for the search. +- `"filters"`: Specifies the filters for the search of listings. +- `"maxResults"`: The maximum number of results to return in a single call to + SearchListings. When the number of results to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to SearchListings to list the next set of results. +- `"nextToken"`: When the number of results is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of results, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to SearchListings to list the next set of + results. +- `"searchIn"`: The details of the search. +- `"searchText"`: Specifies the text for which to search. +- `"sort"`: Specifies the way for sorting the search results. +""" +function search_listings( + domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/listings/search"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_listings( + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/listings/search", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_types(domain_identifier, managed, search_scope) + search_types(domain_identifier, managed, search_scope, params::Dict{String,<:Any}) + +Searches for types in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to invoke the + SearchTypes action. +- `managed`: Specifies whether the search is managed. +- `search_scope`: Specifies the scope of the search for types. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: The filters for the SearchTypes action. +- `"maxResults"`: The maximum number of results to return in a single call to SearchTypes. + When the number of results to be listed is greater than the value of MaxResults, the + response contains a NextToken value that you can use in a subsequent call to SearchTypes to + list the next set of results. +- `"nextToken"`: When the number of results is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of results, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to SearchTypes to list the next set of + results. +- `"searchIn"`: The details of the search. +- `"searchText"`: Specifies the text for which to search. +- `"sort"`: The specifies the way to sort the SearchTypes results. +""" +function search_types( + domainIdentifier, + managed, + searchScope; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/types-search", + Dict{String,Any}("managed" => managed, "searchScope" => searchScope); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_types( + domainIdentifier, + managed, + searchScope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/types-search", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("managed" => managed, "searchScope" => searchScope), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_user_profiles(domain_identifier, user_type) + search_user_profiles(domain_identifier, user_type, params::Dict{String,<:Any}) + +Searches user profiles in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which you want to + search user profiles. +- `user_type`: Specifies the user type for the SearchUserProfiles action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in a single call to + SearchUserProfiles. When the number of results to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to SearchUserProfiles to list the next set of results. +- `"nextToken"`: When the number of results is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of results, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to SearchUserProfiles to list the next + set of results. +- `"searchText"`: Specifies the text for which to search. +""" +function search_user_profiles( + domainIdentifier, userType; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search-user-profiles", + Dict{String,Any}("userType" => userType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_user_profiles( + domainIdentifier, + userType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/search-user-profiles", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("userType" => userType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_data_source_run(data_source_identifier, domain_identifier) + start_data_source_run(data_source_identifier, domain_identifier, params::Dict{String,<:Any}) + +Start the run of the specified data source in Amazon DataZone. + +# Arguments +- `data_source_identifier`: The identifier of the data source. +- `domain_identifier`: The identifier of the Amazon DataZone domain in which to start a + data source run. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function start_data_source_run( + dataSourceIdentifier, + domainIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-sources/$(dataSourceIdentifier)/runs", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_data_source_run( + dataSourceIdentifier, + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-sources/$(dataSourceIdentifier)/runs", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_metadata_generation_run(domain_identifier, owning_project_identifier, target, type) + start_metadata_generation_run(domain_identifier, owning_project_identifier, target, type, params::Dict{String,<:Any}) + +Starts the metadata generation run. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain where you want to start a + metadata generation run. +- `owning_project_identifier`: The ID of the project that owns the asset for which you want + to start a metadata generation run. +- `target`: The asset for which you want to start a metadata generation run. +- `type`: The type of the metadata generation run. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure idempotency of the + request. This field is automatically populated if not provided. +""" +function start_metadata_generation_run( + domainIdentifier, + owningProjectIdentifier, + target, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs", + Dict{String,Any}( + "owningProjectIdentifier" => owningProjectIdentifier, + "target" => target, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_metadata_generation_run( + domainIdentifier, + owningProjectIdentifier, + target, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/metadata-generation-runs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "owningProjectIdentifier" => owningProjectIdentifier, + "target" => target, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Tags a resource in Amazon DataZone. + +# Arguments +- `resource_arn`: The ARN of the resource to be tagged in Amazon DataZone. +- `tags`: Specifies the tags for the TagResource action. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Untags a resource in Amazon DataZone. + +# Arguments +- `resource_arn`: The ARN of the resource to be untagged in Amazon DataZone. +- `tag_keys`: Specifies the tag keys for the UntagResource action. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_data_source(domain_identifier, identifier) + update_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the specified data source in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the domain in which to update a data source. +- `identifier`: The identifier of the data source to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetFormsInput"`: The asset forms to be updated as part of the UpdateDataSource action. +- `"configuration"`: The configuration to be updated as part of the UpdateDataSource action. +- `"description"`: The description to be updated as part of the UpdateDataSource action. +- `"enableSetting"`: The enable setting to be updated as part of the UpdateDataSource + action. +- `"name"`: The name to be updated as part of the UpdateDataSource action. +- `"publishOnImport"`: The publish on import setting to be updated as part of the + UpdateDataSource action. +- `"recommendation"`: The recommendation to be updated as part of the UpdateDataSource + action. +- `"retainPermissionsOnRevokeFailure"`: Specifies that the granted permissions are retained + in case of a self-subscribe functionality failure for a data source. +- `"schedule"`: The schedule to be updated as part of the UpdateDataSource action. +""" +function update_data_source( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_data_source( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/data-sources/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_domain(identifier) + update_domain(identifier, params::Dict{String,<:Any}) + +Updates a Amazon DataZone domain. + +# Arguments +- `identifier`: The ID of the Amazon Web Services domain that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description to be updated as part of the UpdateDomain action. +- `"domainExecutionRole"`: The domain execution role to be updated as part of the + UpdateDomain action. +- `"name"`: The name to be updated as part of the UpdateDomain action. +- `"singleSignOn"`: The single sign-on option to be updated as part of the UpdateDomain + action. +""" +function update_domain(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return datazone( + "PUT", + "/v2/domains/$(identifier)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_domain( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_environment(domain_identifier, identifier) + update_environment(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the specified environment in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the domain in which the environment is to be + updated. +- `identifier`: The identifier of the environment that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description to be updated as part of the UpdateEnvironment action. +- `"glossaryTerms"`: The glossary terms to be updated as part of the UpdateEnvironment + action. +- `"name"`: The name to be updated as part of the UpdateEnvironment action. +""" +function update_environment( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_environment( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_environment_action(domain_identifier, environment_identifier, identifier) + update_environment_action(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Updates an environment action. + +# Arguments +- `domain_identifier`: The domain ID of the environment action. +- `environment_identifier`: The environment ID of the environment action. +- `identifier`: The ID of the environment action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the environment action. +- `"name"`: The name of the environment action. +- `"parameters"`: The parameters of the environment action. +""" +function update_environment_action( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_environment_action( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/actions/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_environment_profile(domain_identifier, identifier) + update_environment_profile(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the specified environment profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which an environment + profile is to be updated. +- `identifier`: The identifier of the environment profile that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsAccountId"`: The Amazon Web Services account in which a specified environment + profile is to be udpated. +- `"awsAccountRegion"`: The Amazon Web Services Region in which a specified environment + profile is to be updated. +- `"description"`: The description to be updated as part of the UpdateEnvironmentProfile + action. +- `"name"`: The name to be updated as part of the UpdateEnvironmentProfile action. +- `"userParameters"`: The user parameters to be updated as part of the + UpdateEnvironmentProfile action. +""" +function update_environment_profile( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_environment_profile( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environment-profiles/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_glossary(domain_identifier, identifier) + update_glossary(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the business glossary in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a business + glossary is to be updated. +- `identifier`: The identifier of the business glossary to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description to be updated as part of the UpdateGlossary action. +- `"name"`: The name to be updated as part of the UpdateGlossary action. +- `"status"`: The status to be updated as part of the UpdateGlossary action. +""" +function update_glossary( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_glossary( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/glossaries/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_glossary_term(domain_identifier, identifier) + update_glossary_term(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates a business glossary term in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a business + glossary term is to be updated. +- `identifier`: The identifier of the business glossary term that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"glossaryIdentifier"`: The identifier of the business glossary in which a term is to be + updated. +- `"longDescription"`: The long description to be updated as part of the UpdateGlossaryTerm + action. +- `"name"`: The name to be updated as part of the UpdateGlossaryTerm action. +- `"shortDescription"`: The short description to be updated as part of the + UpdateGlossaryTerm action. +- `"status"`: The status to be updated as part of the UpdateGlossaryTerm action. +- `"termRelations"`: The term relations to be updated as part of the UpdateGlossaryTerm + action. +""" +function update_glossary_term( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_glossary_term( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/glossary-terms/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_group_profile(domain_identifier, group_identifier, status) + update_group_profile(domain_identifier, group_identifier, status, params::Dict{String,<:Any}) + +Updates the specified group profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a group + profile is updated. +- `group_identifier`: The identifier of the group profile that is updated. +- `status`: The status of the group profile that is updated. + +""" +function update_group_profile( + domainIdentifier, + groupIdentifier, + status; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/group-profiles/$(groupIdentifier)", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_group_profile( + domainIdentifier, + groupIdentifier, + status, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/group-profiles/$(groupIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_project(domain_identifier, identifier) + update_project(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the specified project in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a project is + to be updated. +- `identifier`: The identifier of the project that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description to be updated as part of the UpdateProject action. +- `"glossaryTerms"`: The glossary terms to be updated as part of the UpdateProject action. +- `"name"`: The name to be updated as part of the UpdateProject action. +""" +function update_project( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_project( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/projects/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_subscription_grant_status(asset_identifier, domain_identifier, identifier, status) + update_subscription_grant_status(asset_identifier, domain_identifier, identifier, status, params::Dict{String,<:Any}) + +Updates the status of the specified subscription grant status in Amazon DataZone. + +# Arguments +- `asset_identifier`: The identifier of the asset the subscription grant status of which is + to be updated. +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a subscription + grant status is to be updated. +- `identifier`: The identifier of the subscription grant the status of which is to be + updated. +- `status`: The status to be updated as part of the UpdateSubscriptionGrantStatus action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"failureCause"`: Specifies the error message that is returned if the operation cannot be + successfully completed. +- `"targetName"`: The target name to be updated as part of the + UpdateSubscriptionGrantStatus action. +""" +function update_subscription_grant_status( + assetIdentifier, + domainIdentifier, + identifier, + status; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)/status/$(assetIdentifier)", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_subscription_grant_status( + assetIdentifier, + domainIdentifier, + identifier, + status, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/subscription-grants/$(identifier)/status/$(assetIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_subscription_request(domain_identifier, identifier, request_reason) + update_subscription_request(domain_identifier, identifier, request_reason, params::Dict{String,<:Any}) + +Updates a specified subscription request in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a subscription + request is to be updated. +- `identifier`: The identifier of the subscription request that is to be updated. +- `request_reason`: The reason for the UpdateSubscriptionRequest action. + +""" +function update_subscription_request( + domainIdentifier, + identifier, + requestReason; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)", + Dict{String,Any}("requestReason" => requestReason); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_subscription_request( + domainIdentifier, + identifier, + requestReason, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/subscription-requests/$(identifier)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("requestReason" => requestReason), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_subscription_target(domain_identifier, environment_identifier, identifier) + update_subscription_target(domain_identifier, environment_identifier, identifier, params::Dict{String,<:Any}) + +Updates the specified subscription target in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a subscription + target is to be updated. +- `environment_identifier`: The identifier of the environment in which a subscription + target is to be updated. +- `identifier`: Identifier of the subscription target that is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"applicableAssetTypes"`: The applicable asset types to be updated as part of the + UpdateSubscriptionTarget action. +- `"authorizedPrincipals"`: The authorized principals to be updated as part of the + UpdateSubscriptionTarget action. +- `"manageAccessRole"`: The manage access role to be updated as part of the + UpdateSubscriptionTarget action. +- `"name"`: The name to be updated as part of the UpdateSubscriptionTarget action. +- `"provider"`: The provider to be updated as part of the UpdateSubscriptionTarget action. +- `"subscriptionTargetConfig"`: The configuration to be updated as part of the + UpdateSubscriptionTarget action. +""" +function update_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_subscription_target( + domainIdentifier, + environmentIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/subscription-targets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_user_profile(domain_identifier, status, user_identifier) + update_user_profile(domain_identifier, status, user_identifier, params::Dict{String,<:Any}) + +Updates the specified user profile in Amazon DataZone. + +# Arguments +- `domain_identifier`: The identifier of the Amazon DataZone domain in which a user profile + is updated. +- `status`: The status of the user profile that are to be updated. +- `user_identifier`: The identifier of the user whose user profile is to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"type"`: The type of the user profile that are to be updated. +""" +function update_user_profile( + domainIdentifier, + status, + userIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/user-profiles/$(userIdentifier)", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_user_profile( + domainIdentifier, + status, + userIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/user-profiles/$(userIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/deadline.jl b/src/services/deadline.jl new file mode 100644 index 0000000000..61902b502a --- /dev/null +++ b/src/services/deadline.jl @@ -0,0 +1,4565 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: deadline +using AWS.Compat +using AWS.UUIDs + +""" + associate_member_to_farm(farm_id, identity_store_id, membership_level, principal_id, principal_type) + associate_member_to_farm(farm_id, identity_store_id, membership_level, principal_id, principal_type, params::Dict{String,<:Any}) + +Assigns a farm membership level to a member. + +# Arguments +- `farm_id`: The ID of the farm to associate with the member. +- `identity_store_id`: The identity store ID of the member to associate with the farm. +- `membership_level`: The principal's membership level for the associated farm. +- `principal_id`: The member's principal ID to associate with the farm. +- `principal_type`: The principal type of the member to associate with the farm. + +""" +function associate_member_to_farm( + farmId, + identityStoreId, + membershipLevel, + principalId, + principalType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/members/$(principalId)", + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_member_to_farm( + farmId, + identityStoreId, + membershipLevel, + principalId, + principalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/members/$(principalId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_member_to_fleet(farm_id, fleet_id, identity_store_id, membership_level, principal_id, principal_type) + associate_member_to_fleet(farm_id, fleet_id, identity_store_id, membership_level, principal_id, principal_type, params::Dict{String,<:Any}) + +Assigns a fleet membership level to a member. + +# Arguments +- `farm_id`: The farm ID of the fleet to associate with the member. +- `fleet_id`: The ID of the fleet to associate with a member. +- `identity_store_id`: The member's identity store ID to associate with the fleet. +- `membership_level`: The principal's membership level for the associated fleet. +- `principal_id`: The member's principal ID to associate with a fleet. +- `principal_type`: The member's principal type to associate with the fleet. + +""" +function associate_member_to_fleet( + farmId, + fleetId, + identityStoreId, + membershipLevel, + principalId, + principalType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members/$(principalId)", + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_member_to_fleet( + farmId, + fleetId, + identityStoreId, + membershipLevel, + principalId, + principalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members/$(principalId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_member_to_job(farm_id, identity_store_id, job_id, membership_level, principal_id, principal_type, queue_id) + associate_member_to_job(farm_id, identity_store_id, job_id, membership_level, principal_id, principal_type, queue_id, params::Dict{String,<:Any}) + +Assigns a job membership level to a member + +# Arguments +- `farm_id`: The farm ID of the job to associate with the member. +- `identity_store_id`: The member's identity store ID to associate with the job. +- `job_id`: The job ID to associate with the member. +- `membership_level`: The principal's membership level for the associated job. +- `principal_id`: The member's principal ID to associate with the job. +- `principal_type`: The member's principal type to associate with the job. +- `queue_id`: The queue ID to associate to the member. + +""" +function associate_member_to_job( + farmId, + identityStoreId, + jobId, + membershipLevel, + principalId, + principalType, + queueId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members/$(principalId)", + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_member_to_job( + farmId, + identityStoreId, + jobId, + membershipLevel, + principalId, + principalType, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members/$(principalId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_member_to_queue(farm_id, identity_store_id, membership_level, principal_id, principal_type, queue_id) + associate_member_to_queue(farm_id, identity_store_id, membership_level, principal_id, principal_type, queue_id, params::Dict{String,<:Any}) + +Assigns a queue membership level to a member + +# Arguments +- `farm_id`: The farm ID of the queue to associate with the member. +- `identity_store_id`: The member's identity store ID to associate with the queue. +- `membership_level`: The principal's membership level for the associated queue. +- `principal_id`: The member's principal ID to associate with the queue. +- `principal_type`: The member's principal type to associate with the queue. +- `queue_id`: The ID of the queue to associate to the member. + +""" +function associate_member_to_queue( + farmId, + identityStoreId, + membershipLevel, + principalId, + principalType, + queueId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members/$(principalId)", + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_member_to_queue( + farmId, + identityStoreId, + membershipLevel, + principalId, + principalType, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members/$(principalId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "identityStoreId" => identityStoreId, + "membershipLevel" => membershipLevel, + "principalType" => principalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + assume_fleet_role_for_read(farm_id, fleet_id) + assume_fleet_role_for_read(farm_id, fleet_id, params::Dict{String,<:Any}) + +Get Amazon Web Services credentials from the fleet role. The IAM permissions of the +credentials are scoped down to have read-only access. + +# Arguments +- `farm_id`: The farm ID for the fleet's farm. +- `fleet_id`: The fleet ID. + +""" +function assume_fleet_role_for_read( + farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/read-roles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_fleet_role_for_read( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/read-roles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + assume_fleet_role_for_worker(farm_id, fleet_id, worker_id) + assume_fleet_role_for_worker(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Get credentials from the fleet role for a worker. + +# Arguments +- `farm_id`: The farm ID for the fleet's farm. +- `fleet_id`: The fleet ID that contains the worker. +- `worker_id`: The ID of the worker assuming the fleet role. + +""" +function assume_fleet_role_for_worker( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/fleet-roles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_fleet_role_for_worker( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/fleet-roles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + assume_queue_role_for_read(farm_id, queue_id) + assume_queue_role_for_read(farm_id, queue_id, params::Dict{String,<:Any}) + +Gets Amazon Web Services credentials from the queue role. The IAM permissions of the +credentials are scoped down to have read-only access. + +# Arguments +- `farm_id`: The farm ID of the farm containing the queue. +- `queue_id`: The queue ID. + +""" +function assume_queue_role_for_read( + farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/read-roles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_queue_role_for_read( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/read-roles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + assume_queue_role_for_user(farm_id, queue_id) + assume_queue_role_for_user(farm_id, queue_id, params::Dict{String,<:Any}) + +Allows a user to assume a role for a queue. + +# Arguments +- `farm_id`: The farm ID of the queue that the user assumes the role for. +- `queue_id`: The queue ID of the queue that the user assumes the role for. + +""" +function assume_queue_role_for_user( + farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/user-roles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_queue_role_for_user( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/user-roles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + assume_queue_role_for_worker(farm_id, fleet_id, queue_id, worker_id) + assume_queue_role_for_worker(farm_id, fleet_id, queue_id, worker_id, params::Dict{String,<:Any}) + +Allows a worker to assume a queue role. + +# Arguments +- `farm_id`: The farm ID of the worker assuming the queue role. +- `fleet_id`: The fleet ID of the worker assuming the queue role. +- `queue_id`: The queue ID of the worker assuming the queue role. +- `worker_id`: The worker ID of the worker assuming the queue role. + +""" +function assume_queue_role_for_worker( + farmId, fleetId, queueId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/queue-roles", + Dict{String,Any}("queueId" => queueId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_queue_role_for_worker( + farmId, + fleetId, + queueId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/queue-roles", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("queueId" => queueId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_get_job_entity(farm_id, fleet_id, identifiers, worker_id) + batch_get_job_entity(farm_id, fleet_id, identifiers, worker_id, params::Dict{String,<:Any}) + +Get batched job details for a worker. + +# Arguments +- `farm_id`: The farm ID of the worker that's fetching job details. The worker must have an + assignment on a job to fetch job details. +- `fleet_id`: The fleet ID of the worker that's fetching job details. The worker must have + an assignment on a job to fetch job details. +- `identifiers`: The job identifiers to include within the job entity batch details. +- `worker_id`: The worker ID of the worker containing the job details to get. + +""" +function batch_get_job_entity( + farmId, + fleetId, + identifiers, + workerId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/batchGetJobEntity", + Dict{String,Any}("identifiers" => identifiers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_job_entity( + farmId, + fleetId, + identifiers, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/batchGetJobEntity", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifiers" => identifiers), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + copy_job_template(farm_id, job_id, queue_id, target_s3_location) + copy_job_template(farm_id, job_id, queue_id, target_s3_location, params::Dict{String,<:Any}) + +Copies a job template to an Amazon S3 bucket. + +# Arguments +- `farm_id`: The farm ID to copy. +- `job_id`: The job ID to copy. +- `queue_id`: The queue ID to copy. +- `target_s3_location`: The Amazon S3 bucket name and key where you would like to add a + copy of the job template. + +""" +function copy_job_template( + farmId, + jobId, + queueId, + targetS3Location; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/template", + Dict{String,Any}("targetS3Location" => targetS3Location); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_job_template( + farmId, + jobId, + queueId, + targetS3Location, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/template", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("targetS3Location" => targetS3Location), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_budget(actions, approximate_dollar_limit, display_name, farm_id, schedule, usage_tracking_resource) + create_budget(actions, approximate_dollar_limit, display_name, farm_id, schedule, usage_tracking_resource, params::Dict{String,<:Any}) + +Creates a budget to set spending thresholds for your rendering activity. + +# Arguments +- `actions`: The budget actions to specify what happens when the budget runs out. +- `approximate_dollar_limit`: The dollar limit based on consumed usage. +- `display_name`: The display name of the budget. +- `farm_id`: The farm ID to include in this budget. +- `schedule`: The schedule to associate with this budget. +- `usage_tracking_resource`: The queue ID provided to this budget to track usage. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"description"`: The description of the budget. +""" +function create_budget( + actions, + approximateDollarLimit, + displayName, + farmId, + schedule, + usageTrackingResource; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/budgets", + Dict{String,Any}( + "actions" => actions, + "approximateDollarLimit" => approximateDollarLimit, + "displayName" => displayName, + "schedule" => schedule, + "usageTrackingResource" => usageTrackingResource, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_budget( + actions, + approximateDollarLimit, + displayName, + farmId, + schedule, + usageTrackingResource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/budgets", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "actions" => actions, + "approximateDollarLimit" => approximateDollarLimit, + "displayName" => displayName, + "schedule" => schedule, + "usageTrackingResource" => usageTrackingResource, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_farm(display_name) + create_farm(display_name, params::Dict{String,<:Any}) + +Creates a farm to allow space for queues and fleets. Farms are the space where the +components of your renders gather and are pieced together in the cloud. Farms contain +budgets and allow you to enforce permissions. Deadline Cloud farms are a useful container +for large projects. + +# Arguments +- `display_name`: The display name of the farm. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"description"`: The description of the farm. +- `"kmsKeyArn"`: The ARN of the KMS key to use on the farm. +- `"tags"`: The tags to add to your farm. Each tag consists of a tag key and a tag value. + Tag keys and values are both required, but tag values can be empty strings. +""" +function create_farm(displayName; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "POST", + "/2023-10-12/farms", + Dict{String,Any}( + "displayName" => displayName, "X-Amz-Client-Token" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_farm( + displayName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, "X-Amz-Client-Token" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_fleet(configuration, display_name, farm_id, max_worker_count, role_arn) + create_fleet(configuration, display_name, farm_id, max_worker_count, role_arn, params::Dict{String,<:Any}) + +Creates a fleet. Fleets gather information relating to compute, or capacity, for renders +within your farms. You can choose to manage your own capacity or opt to have fleets fully +managed by Deadline Cloud. + +# Arguments +- `configuration`: The configuration settings for the fleet. Customer managed fleets are + self-managed. Service managed Amazon EC2 fleets are managed by Deadline Cloud. +- `display_name`: The display name of the fleet. +- `farm_id`: The farm ID of the farm to connect to the fleet. +- `max_worker_count`: The maximum number of workers for the fleet. +- `role_arn`: The IAM role ARN for the role that the fleet's workers will use. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"description"`: The description of the fleet. +- `"minWorkerCount"`: The minimum number of workers for the fleet. +- `"tags"`: Each tag consists of a tag key and a tag value. Tag keys and values are both + required, but tag values can be empty strings. +""" +function create_fleet( + configuration, + displayName, + farmId, + maxWorkerCount, + roleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets", + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "maxWorkerCount" => maxWorkerCount, + "roleArn" => roleArn, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_fleet( + configuration, + displayName, + farmId, + maxWorkerCount, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "maxWorkerCount" => maxWorkerCount, + "roleArn" => roleArn, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_job(farm_id, priority, queue_id, template, template_type) + create_job(farm_id, priority, queue_id, template, template_type, params::Dict{String,<:Any}) + +Creates a job. A job is a render submission submitted by a user. It contains specific job +properties outlined as steps and tasks. + +# Arguments +- `farm_id`: The farm ID of the farm to connect to the job. +- `priority`: The priority of the job on a scale of 1 to 100. The highest priority is 1. +- `queue_id`: The ID of the queue that the job is submitted to. +- `template`: The job template to use for this job. +- `template_type`: The file type for the job template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"attachments"`: The attachments for the job. Attach files required for the job to run to + a render job. +- `"maxFailedTasksCount"`: The number of task failures before the job stops running and is + marked as FAILED. +- `"maxRetriesPerTask"`: The maximum number of retries for a job. +- `"parameters"`: The parameters for the job. +- `"storageProfileId"`: The storage profile ID for the storage profile to connect to the + job. +- `"targetTaskRunStatus"`: The initial status of the job's tasks when they are created. + Tasks that are created with a SUSPENDED status will not run until you update their status. +""" +function create_job( + farmId, + priority, + queueId, + template, + templateType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs", + Dict{String,Any}( + "priority" => priority, + "template" => template, + "templateType" => templateType, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_job( + farmId, + priority, + queueId, + template, + templateType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "priority" => priority, + "template" => template, + "templateType" => templateType, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_license_endpoint(security_group_ids, subnet_ids, vpc_id) + create_license_endpoint(security_group_ids, subnet_ids, vpc_id, params::Dict{String,<:Any}) + +Creates a license endpoint to integrate your various licensed software used for rendering +on Deadline Cloud. + +# Arguments +- `security_group_ids`: The security group IDs. +- `subnet_ids`: The subnet IDs. +- `vpc_id`: The VPC (virtual private cloud) ID to use with the license endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"tags"`: Each tag consists of a tag key and a tag value. Tag keys and values are both + required, but tag values can be empty strings. +""" +function create_license_endpoint( + securityGroupIds, subnetIds, vpcId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/license-endpoints", + Dict{String,Any}( + "securityGroupIds" => securityGroupIds, + "subnetIds" => subnetIds, + "vpcId" => vpcId, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_license_endpoint( + securityGroupIds, + subnetIds, + vpcId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/license-endpoints", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "securityGroupIds" => securityGroupIds, + "subnetIds" => subnetIds, + "vpcId" => vpcId, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_monitor(display_name, identity_center_instance_arn, role_arn, subdomain) + create_monitor(display_name, identity_center_instance_arn, role_arn, subdomain, params::Dict{String,<:Any}) + +Creates an Amazon Web Services Deadline Cloud monitor that you can use to view your farms, +queues, and fleets. After you submit a job, you can track the progress of the tasks and +steps that make up the job, and then download the job's results. + +# Arguments +- `display_name`: The name that you give the monitor that is displayed in the Deadline + Cloud console. +- `identity_center_instance_arn`: The Amazon Resource Name (ARN) of the IAM Identity Center + instance that authenticates monitor users. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role that the monitor uses to + connect to Deadline Cloud. Every user that signs in to the monitor using IAM Identity + Center uses this role to access Deadline Cloud resources. +- `subdomain`: The subdomain to use when creating the monitor URL. The full URL of the + monitor is subdomain.Region.deadlinecloud.amazonaws.com. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function create_monitor( + displayName, + identityCenterInstanceArn, + roleArn, + subdomain; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/monitors", + Dict{String,Any}( + "displayName" => displayName, + "identityCenterInstanceArn" => identityCenterInstanceArn, + "roleArn" => roleArn, + "subdomain" => subdomain, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_monitor( + displayName, + identityCenterInstanceArn, + roleArn, + subdomain, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/monitors", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, + "identityCenterInstanceArn" => identityCenterInstanceArn, + "roleArn" => roleArn, + "subdomain" => subdomain, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_queue(display_name, farm_id) + create_queue(display_name, farm_id, params::Dict{String,<:Any}) + +Creates a queue to coordinate the order in which jobs run on a farm. A queue can also +specify where to pull resources and indicate where to output completed jobs. + +# Arguments +- `display_name`: The display name of the queue. +- `farm_id`: The farm ID of the farm to connect to the queue. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"allowedStorageProfileIds"`: The storage profile IDs to include in the queue. +- `"defaultBudgetAction"`: The default action to take on a queue if a budget isn't + configured. +- `"description"`: The description of the queue. +- `"jobAttachmentSettings"`: The job attachment settings for the queue. These are the + Amazon S3 bucket name and the Amazon S3 prefix. +- `"jobRunAsUser"`: The jobs in the queue run as the specified POSIX user. +- `"requiredFileSystemLocationNames"`: The file system location name to include in the + queue. +- `"roleArn"`: The IAM role ARN that workers will use while running jobs for this queue. +- `"tags"`: Each tag consists of a tag key and a tag value. Tag keys and values are both + required, but tag values can be empty strings. +""" +function create_queue( + displayName, farmId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues", + Dict{String,Any}( + "displayName" => displayName, "X-Amz-Client-Token" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_queue( + displayName, + farmId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, "X-Amz-Client-Token" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_queue_environment(farm_id, priority, queue_id, template, template_type) + create_queue_environment(farm_id, priority, queue_id, template, template_type, params::Dict{String,<:Any}) + +Creates an environment for a queue that defines how jobs in the queue run. + +# Arguments +- `farm_id`: The farm ID of the farm to connect to the environment. +- `priority`: Sets the priority of the environments in the queue from 0 to 10,000, where 0 + is the highest priority. If two environments share the same priority value, the environment + created first takes higher priority. +- `queue_id`: The queue ID to connect the queue and environment. +- `template`: The environment template to use in the queue. +- `template_type`: The template's file type, JSON or YAML. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function create_queue_environment( + farmId, + priority, + queueId, + template, + templateType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments", + Dict{String,Any}( + "priority" => priority, + "template" => template, + "templateType" => templateType, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_queue_environment( + farmId, + priority, + queueId, + template, + templateType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "priority" => priority, + "template" => template, + "templateType" => templateType, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_queue_fleet_association(farm_id, fleet_id, queue_id) + create_queue_fleet_association(farm_id, fleet_id, queue_id, params::Dict{String,<:Any}) + +Creates an association between a queue and a fleet. + +# Arguments +- `farm_id`: The ID of the farm that the queue and fleet belong to. +- `fleet_id`: The fleet ID. +- `queue_id`: The queue ID. + +""" +function create_queue_fleet_association( + farmId, fleetId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations", + Dict{String,Any}("fleetId" => fleetId, "queueId" => queueId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_queue_fleet_association( + farmId, + fleetId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("fleetId" => fleetId, "queueId" => queueId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_storage_profile(display_name, farm_id, os_family) + create_storage_profile(display_name, farm_id, os_family, params::Dict{String,<:Any}) + +Creates a storage profile that specifies the operating system, file type, and file location +of resources used on a farm. + +# Arguments +- `display_name`: The display name of the storage profile. +- `farm_id`: The farm ID of the farm to connect to the storage profile. +- `os_family`: The type of operating system (OS) for the storage profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"fileSystemLocations"`: File system paths to include in the storage profile. +""" +function create_storage_profile( + displayName, farmId, osFamily; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/storage-profiles", + Dict{String,Any}( + "displayName" => displayName, + "osFamily" => osFamily, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_storage_profile( + displayName, + farmId, + osFamily, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/storage-profiles", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, + "osFamily" => osFamily, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_worker(farm_id, fleet_id) + create_worker(farm_id, fleet_id, params::Dict{String,<:Any}) + +Creates a worker. A worker tells your instance how much processing power (vCPU), and memory +(GiB) you’ll need to assemble the digital assets held within a particular instance. You +can specify certain instance types to use, or let the worker know which instances types to +exclude. + +# Arguments +- `farm_id`: The farm ID of the farm to connect to the worker. +- `fleet_id`: The fleet ID to connect to the worker. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"hostProperties"`: The IP address and host name of the worker. +""" +function create_worker(farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_worker( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_budget(budget_id, farm_id) + delete_budget(budget_id, farm_id, params::Dict{String,<:Any}) + +Deletes a budget. + +# Arguments +- `budget_id`: The budget ID of the budget to delete. +- `farm_id`: The farm ID of the farm to remove from the budget. + +""" +function delete_budget(budgetId, farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_budget( + budgetId, + farmId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_farm(farm_id) + delete_farm(farm_id, params::Dict{String,<:Any}) + +Deletes a farm. + +# Arguments +- `farm_id`: The farm ID of the farm to delete. + +""" +function delete_farm(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_farm( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_fleet(farm_id, fleet_id) + delete_fleet(farm_id, fleet_id, params::Dict{String,<:Any}) + +Deletes a fleet. + +# Arguments +- `farm_id`: The farm ID of the farm to remove from the fleet. +- `fleet_id`: The fleet ID of the fleet to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function delete_fleet(farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_fleet( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_license_endpoint(license_endpoint_id) + delete_license_endpoint(license_endpoint_id, params::Dict{String,<:Any}) + +Deletes a license endpoint. + +# Arguments +- `license_endpoint_id`: The license endpoint ID of the license endpoint to delete. + +""" +function delete_license_endpoint( + licenseEndpointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/license-endpoints/$(licenseEndpointId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_license_endpoint( + licenseEndpointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/license-endpoints/$(licenseEndpointId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_metered_product(license_endpoint_id, product_id) + delete_metered_product(license_endpoint_id, product_id, params::Dict{String,<:Any}) + +Deletes a metered product. + +# Arguments +- `license_endpoint_id`: The ID of the license endpoint from which to remove the metered + product. +- `product_id`: The product ID to remove from the license endpoint. + +""" +function delete_metered_product( + licenseEndpointId, productId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products/$(productId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_metered_product( + licenseEndpointId, + productId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products/$(productId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_monitor(monitor_id) + delete_monitor(monitor_id, params::Dict{String,<:Any}) + +Removes a Deadline Cloud monitor. After you delete a monitor, you can create a new one and +attach farms to the monitor. + +# Arguments +- `monitor_id`: The unique identifier of the monitor to delete. This ID is returned by the + CreateMonitor operation, and is included in the response to the GetMonitor operation. + +""" +function delete_monitor(monitorId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "DELETE", + "/2023-10-12/monitors/$(monitorId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_monitor( + monitorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/monitors/$(monitorId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_queue(farm_id, queue_id) + delete_queue(farm_id, queue_id, params::Dict{String,<:Any}) + +Deletes a queue. + +# Arguments +- `farm_id`: The ID of the farm from which to remove the queue. +- `queue_id`: The queue ID of the queue to delete. + +""" +function delete_queue(farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_queue_environment(farm_id, queue_environment_id, queue_id) + delete_queue_environment(farm_id, queue_environment_id, queue_id, params::Dict{String,<:Any}) + +Deletes a queue environment. + +# Arguments +- `farm_id`: The farm ID of the farm from which to remove the queue environment. +- `queue_environment_id`: The queue environment ID of the queue environment to delete. +- `queue_id`: The queue ID of the queue environment to delete. + +""" +function delete_queue_environment( + farmId, queueEnvironmentId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue_environment( + farmId, + queueEnvironmentId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_queue_fleet_association(farm_id, fleet_id, queue_id) + delete_queue_fleet_association(farm_id, fleet_id, queue_id, params::Dict{String,<:Any}) + +Deletes a queue-fleet association. + +# Arguments +- `farm_id`: The farm ID of the farm that holds the queue-fleet association. +- `fleet_id`: The fleet ID of the queue-fleet association. +- `queue_id`: The queue ID of the queue-fleet association. + +""" +function delete_queue_fleet_association( + farmId, fleetId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue_fleet_association( + farmId, + fleetId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_storage_profile(farm_id, storage_profile_id) + delete_storage_profile(farm_id, storage_profile_id, params::Dict{String,<:Any}) + +Deletes a storage profile. + +# Arguments +- `farm_id`: The farm ID of the farm from which to remove the storage profile. +- `storage_profile_id`: The storage profile ID of the storage profile to delete. + +""" +function delete_storage_profile( + farmId, storageProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_storage_profile( + farmId, + storageProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_worker(farm_id, fleet_id, worker_id) + delete_worker(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Deletes a worker. + +# Arguments +- `farm_id`: The farm ID of the worker to delete. +- `fleet_id`: The fleet ID of the worker to delete. +- `worker_id`: The worker ID of the worker to delete. + +""" +function delete_worker( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_worker( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_member_from_farm(farm_id, principal_id) + disassociate_member_from_farm(farm_id, principal_id, params::Dict{String,<:Any}) + +Disassociates a member from a farm. + +# Arguments +- `farm_id`: The farm ID of the farm to disassociate from the member. +- `principal_id`: A member's principal ID to disassociate from a farm. + +""" +function disassociate_member_from_farm( + farmId, principalId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/members/$(principalId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_member_from_farm( + farmId, + principalId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/members/$(principalId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_member_from_fleet(farm_id, fleet_id, principal_id) + disassociate_member_from_fleet(farm_id, fleet_id, principal_id, params::Dict{String,<:Any}) + +Disassociates a member from a fleet. + +# Arguments +- `farm_id`: The farm ID of the fleet to disassociate a member from. +- `fleet_id`: The fleet ID of the fleet to from which to disassociate a member. +- `principal_id`: A member's principal ID to disassociate from a fleet. + +""" +function disassociate_member_from_fleet( + farmId, fleetId, principalId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members/$(principalId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_member_from_fleet( + farmId, + fleetId, + principalId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members/$(principalId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_member_from_job(farm_id, job_id, principal_id, queue_id) + disassociate_member_from_job(farm_id, job_id, principal_id, queue_id, params::Dict{String,<:Any}) + +Disassociates a member from a job. + +# Arguments +- `farm_id`: The farm ID for the job to disassociate from the member. +- `job_id`: The job ID to disassociate from a member in a job. +- `principal_id`: A member's principal ID to disassociate from a job. +- `queue_id`: The queue ID connected to a job for which you're disassociating a member. + +""" +function disassociate_member_from_job( + farmId, jobId, principalId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members/$(principalId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_member_from_job( + farmId, + jobId, + principalId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members/$(principalId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_member_from_queue(farm_id, principal_id, queue_id) + disassociate_member_from_queue(farm_id, principal_id, queue_id, params::Dict{String,<:Any}) + +Disassociates a member from a queue. + +# Arguments +- `farm_id`: The farm ID for the queue to disassociate from a member. +- `principal_id`: A member's principal ID to disassociate from a queue. +- `queue_id`: The queue ID of the queue in which you're disassociating from a member. + +""" +function disassociate_member_from_queue( + farmId, principalId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members/$(principalId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_member_from_queue( + farmId, + principalId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members/$(principalId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_budget(budget_id, farm_id) + get_budget(budget_id, farm_id, params::Dict{String,<:Any}) + +Get a budget. + +# Arguments +- `budget_id`: The budget ID. +- `farm_id`: The farm ID of the farm connected to the budget. + +""" +function get_budget(budgetId, farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_budget( + budgetId, + farmId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_farm(farm_id) + get_farm(farm_id, params::Dict{String,<:Any}) + +Get a farm. + +# Arguments +- `farm_id`: The farm ID of the farm. + +""" +function get_farm(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_farm( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_fleet(farm_id, fleet_id) + get_fleet(farm_id, fleet_id, params::Dict{String,<:Any}) + +Get a fleet. + +# Arguments +- `farm_id`: The farm ID of the farm in the fleet. +- `fleet_id`: The fleet ID of the fleet to get. + +""" +function get_fleet(farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_fleet( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_job(farm_id, job_id, queue_id) + get_job(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Gets a Deadline Cloud job. + +# Arguments +- `farm_id`: The farm ID of the farm in the job. +- `job_id`: The job ID. +- `queue_id`: The queue ID associated with the job. + +""" +function get_job(farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_job( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_license_endpoint(license_endpoint_id) + get_license_endpoint(license_endpoint_id, params::Dict{String,<:Any}) + +Gets a licence endpoint. + +# Arguments +- `license_endpoint_id`: The license endpoint ID. + +""" +function get_license_endpoint( + licenseEndpointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/license-endpoints/$(licenseEndpointId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_license_endpoint( + licenseEndpointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/license-endpoints/$(licenseEndpointId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_monitor(monitor_id) + get_monitor(monitor_id, params::Dict{String,<:Any}) + +Gets information about the specified monitor. + +# Arguments +- `monitor_id`: The unique identifier for the monitor. This ID is returned by the + CreateMonitor operation. + +""" +function get_monitor(monitorId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/monitors/$(monitorId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_monitor( + monitorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/monitors/$(monitorId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_queue(farm_id, queue_id) + get_queue(farm_id, queue_id, params::Dict{String,<:Any}) + +Gets a queue. + +# Arguments +- `farm_id`: The farm ID of the farm in the queue. +- `queue_id`: The queue ID for the queue to retrieve. + +""" +function get_queue(farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_queue( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_queue_environment(farm_id, queue_environment_id, queue_id) + get_queue_environment(farm_id, queue_environment_id, queue_id, params::Dict{String,<:Any}) + +Gets a queue environment. + +# Arguments +- `farm_id`: The farm ID for the queue environment. +- `queue_environment_id`: The queue environment ID. +- `queue_id`: The queue ID for the queue environment. + +""" +function get_queue_environment( + farmId, queueEnvironmentId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_queue_environment( + farmId, + queueEnvironmentId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_queue_fleet_association(farm_id, fleet_id, queue_id) + get_queue_fleet_association(farm_id, fleet_id, queue_id, params::Dict{String,<:Any}) + +Gets a queue-fleet association. + +# Arguments +- `farm_id`: The farm ID of the farm that contains the queue-fleet association. +- `fleet_id`: The fleet ID for the queue-fleet association. +- `queue_id`: The queue ID for the queue-fleet association. + +""" +function get_queue_fleet_association( + farmId, fleetId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_queue_fleet_association( + farmId, + fleetId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_session(farm_id, job_id, queue_id, session_id) + get_session(farm_id, job_id, queue_id, session_id, params::Dict{String,<:Any}) + +Gets a session. + +# Arguments +- `farm_id`: The farm ID for the session. +- `job_id`: The job ID for the session. +- `queue_id`: The queue ID for the session. +- `session_id`: The session ID. + +""" +function get_session( + farmId, jobId, queueId, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions/$(sessionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_session( + farmId, + jobId, + queueId, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions/$(sessionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_session_action(farm_id, job_id, queue_id, session_action_id) + get_session_action(farm_id, job_id, queue_id, session_action_id, params::Dict{String,<:Any}) + +Gets a session action for the job. + +# Arguments +- `farm_id`: The farm ID for the session action. +- `job_id`: The job ID for the session. +- `queue_id`: The queue ID for the session action. +- `session_action_id`: The session action ID for the session. + +""" +function get_session_action( + farmId, + jobId, + queueId, + sessionActionId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/session-actions/$(sessionActionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_session_action( + farmId, + jobId, + queueId, + sessionActionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/session-actions/$(sessionActionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sessions_statistics_aggregation(aggregation_id, farm_id) + get_sessions_statistics_aggregation(aggregation_id, farm_id, params::Dict{String,<:Any}) + +Gets a set of statistics for queues or farms. Before you can call the +GetSessionStatisticsAggregation operation, you must first call the +StartSessionsStatisticsAggregation operation. Statistics are available for 1 hour after you +call the StartSessionsStatisticsAggregation operation. + +# Arguments +- `aggregation_id`: The identifier returned by the StartSessionsStatisticsAggregation + operation that identifies the aggregated statistics. +- `farm_id`: The identifier of the farm to include in the statistics. This should be the + same as the farm ID used in the call to the StartSessionsStatisticsAggregation operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function get_sessions_statistics_aggregation( + aggregationId, farmId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/sessions-statistics-aggregation", + Dict{String,Any}("aggregationId" => aggregationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sessions_statistics_aggregation( + aggregationId, + farmId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/sessions-statistics-aggregation", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("aggregationId" => aggregationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_step(farm_id, job_id, queue_id, step_id) + get_step(farm_id, job_id, queue_id, step_id, params::Dict{String,<:Any}) + +Gets a step. + +# Arguments +- `farm_id`: The farm ID for the step. +- `job_id`: The job ID for the step. +- `queue_id`: The queue ID for the step. +- `step_id`: The step ID. + +""" +function get_step( + farmId, jobId, queueId, stepId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_step( + farmId, + jobId, + queueId, + stepId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_storage_profile(farm_id, storage_profile_id) + get_storage_profile(farm_id, storage_profile_id, params::Dict{String,<:Any}) + +Gets a storage profile. + +# Arguments +- `farm_id`: The farm ID for the storage profile. +- `storage_profile_id`: The storage profile ID. + +""" +function get_storage_profile( + farmId, storageProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_storage_profile( + farmId, + storageProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_storage_profile_for_queue(farm_id, queue_id, storage_profile_id) + get_storage_profile_for_queue(farm_id, queue_id, storage_profile_id, params::Dict{String,<:Any}) + +Gets a storage profile for a queue. + +# Arguments +- `farm_id`: The farm ID for the queue in storage profile. +- `queue_id`: The queue ID the queue in the storage profile. +- `storage_profile_id`: The storage profile ID for the storage profile in the queue. + +""" +function get_storage_profile_for_queue( + farmId, queueId, storageProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/storage-profiles/$(storageProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_storage_profile_for_queue( + farmId, + queueId, + storageProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/storage-profiles/$(storageProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_task(farm_id, job_id, queue_id, step_id, task_id) + get_task(farm_id, job_id, queue_id, step_id, task_id, params::Dict{String,<:Any}) + +Gets a task. + +# Arguments +- `farm_id`: The farm ID of the farm connected to the task. +- `job_id`: The job ID of the job connected to the task. +- `queue_id`: The queue ID for the queue connected to the task. +- `step_id`: The step ID for the step connected to the task. +- `task_id`: The task ID. + +""" +function get_task( + farmId, + jobId, + queueId, + stepId, + taskId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks/$(taskId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_task( + farmId, + jobId, + queueId, + stepId, + taskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks/$(taskId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_worker(farm_id, fleet_id, worker_id) + get_worker(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Gets a worker. + +# Arguments +- `farm_id`: The farm ID for the worker. +- `fleet_id`: The fleet ID of the worker. +- `worker_id`: The worker ID. + +""" +function get_worker( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_worker( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_available_metered_products() + list_available_metered_products(params::Dict{String,<:Any}) + +A list of the available metered products. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_available_metered_products(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/metered-products"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_available_metered_products( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/metered-products", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_budgets(farm_id) + list_budgets(farm_id, params::Dict{String,<:Any}) + +A list of budgets in a farm. + +# Arguments +- `farm_id`: The farm ID associated with the budgets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"status"`: The status to list for the budgets. +""" +function list_budgets(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/budgets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_budgets( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/budgets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_farm_members(farm_id) + list_farm_members(farm_id, params::Dict{String,<:Any}) + +Lists the members of a farm. + +# Arguments +- `farm_id`: The farm ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_farm_members(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/members"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_farm_members( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/members", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_farms() + list_farms(params::Dict{String,<:Any}) + +Lists farms. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"principalId"`: The principal ID of the member to list on the farm. +""" +function list_farms(; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", "/2023-10-12/farms"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_farms( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_fleet_members(farm_id, fleet_id) + list_fleet_members(farm_id, fleet_id, params::Dict{String,<:Any}) + +Lists fleet members. + +# Arguments +- `farm_id`: The farm ID of the fleet. +- `fleet_id`: The fleet ID to include on the list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_fleet_members( + farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_fleet_members( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/members", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_fleets(farm_id) + list_fleets(farm_id, params::Dict{String,<:Any}) + +Lists fleets. + +# Arguments +- `farm_id`: The farm ID of the fleets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"displayName"`: The display names of a list of fleets. +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"principalId"`: The principal ID of the members to include in the fleet. +- `"status"`: The status of the fleet. +""" +function list_fleets(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_fleets( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_job_members(farm_id, job_id, queue_id) + list_job_members(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Lists members on a job. + +# Arguments +- `farm_id`: The farm ID of the job to list. +- `job_id`: The job ID to include on the list. +- `queue_id`: The queue ID to include on the list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_job_members( + farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_job_members( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/members", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_jobs(farm_id, queue_id) + list_jobs(farm_id, queue_id, params::Dict{String,<:Any}) + +Lists jobs. + +# Arguments +- `farm_id`: The farm ID for the jobs. +- `queue_id`: The queue ID for the job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"principalId"`: The principal ID of the members on the jobs. +""" +function list_jobs(farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_jobs( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_license_endpoints() + list_license_endpoints(params::Dict{String,<:Any}) + +Lists license endpoints. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_license_endpoints(; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/license-endpoints"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_license_endpoints( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/license-endpoints", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_metered_products(license_endpoint_id) + list_metered_products(license_endpoint_id, params::Dict{String,<:Any}) + +Lists metered products. + +# Arguments +- `license_endpoint_id`: The license endpoint ID to include on the list of metered products. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_metered_products( + licenseEndpointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_metered_products( + licenseEndpointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_monitors() + list_monitors(params::Dict{String,<:Any}) + +Gets a list of your monitors in Deadline Cloud. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_monitors(; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/monitors"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_monitors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/monitors", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_queue_environments(farm_id, queue_id) + list_queue_environments(farm_id, queue_id, params::Dict{String,<:Any}) + +Lists queue environments. + +# Arguments +- `farm_id`: The farm ID for the queue environment list. +- `queue_id`: The queue ID for the queue environment list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_queue_environments( + farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_queue_environments( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_queue_fleet_associations(farm_id) + list_queue_fleet_associations(farm_id, params::Dict{String,<:Any}) + +Lists queue-fleet associations. + +# Arguments +- `farm_id`: The farm ID for the queue-fleet association list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"fleetId"`: The fleet ID for the queue-fleet association list. +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"queueId"`: The queue ID for the queue-fleet association list. +""" +function list_queue_fleet_associations( + farmId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_queue_fleet_associations( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_queue_members(farm_id, queue_id) + list_queue_members(farm_id, queue_id, params::Dict{String,<:Any}) + +Lists the members in a queue. + +# Arguments +- `farm_id`: The farm ID for the queue. +- `queue_id`: The queue ID to include on the list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_queue_members( + farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_queue_members( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/members", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_queues(farm_id) + list_queues(farm_id, params::Dict{String,<:Any}) + +Lists queues. + +# Arguments +- `farm_id`: The farm ID of the queue. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"principalId"`: The principal ID. This filter is only valid when using Nimble Studio + credentials and should match the user ID in the credentials of the caller. +- `"status"`: The status of the queues listed. ACTIVE–The queues are active. + SCHEDULING–The queues are scheduling. SCHEDULING_BLOCKED–The queue scheduling is + blocked for these queues. +""" +function list_queues(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_queues( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_session_actions(farm_id, job_id, queue_id) + list_session_actions(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Lists session actions. + +# Arguments +- `farm_id`: The farm ID for the session actions list. +- `job_id`: The job ID for the session actions list. +- `queue_id`: The queue ID for the session actions list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +- `"sessionId"`: The session ID to include on the sessions action list. +- `"taskId"`: The task ID for the session actions list. +""" +function list_session_actions( + farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/session-actions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_session_actions( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/session-actions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_sessions(farm_id, job_id, queue_id) + list_sessions(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Lists sessions. + +# Arguments +- `farm_id`: The farm ID for the list of sessions. +- `job_id`: The job ID for the list of sessions. +- `queue_id`: The queue ID for the list of sessions + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_sessions( + farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_sessions( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_sessions_for_worker(farm_id, fleet_id, worker_id) + list_sessions_for_worker(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Lists sessions for a worker. + +# Arguments +- `farm_id`: The farm ID for the session. +- `fleet_id`: The fleet ID for the session. +- `worker_id`: The worker ID for the session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_sessions_for_worker( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/sessions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_sessions_for_worker( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/sessions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_step_consumers(farm_id, job_id, queue_id, step_id) + list_step_consumers(farm_id, job_id, queue_id, step_id, params::Dict{String,<:Any}) + +Lists step consumers. + +# Arguments +- `farm_id`: The farm ID for the list of step consumers. +- `job_id`: The job ID for the step consumer. +- `queue_id`: The queue ID for the step consumer. +- `step_id`: The step ID to include on the list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_step_consumers( + farmId, jobId, queueId, stepId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/consumers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_step_consumers( + farmId, + jobId, + queueId, + stepId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/consumers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_step_dependencies(farm_id, job_id, queue_id, step_id) + list_step_dependencies(farm_id, job_id, queue_id, step_id, params::Dict{String,<:Any}) + +Lists the dependencies for a step. + +# Arguments +- `farm_id`: The farm ID for the step dependencies list. +- `job_id`: The job ID for the step dependencies list. +- `queue_id`: The queue ID for the step dependencies list. +- `step_id`: The step ID to include on the list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_step_dependencies( + farmId, jobId, queueId, stepId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/dependencies"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_step_dependencies( + farmId, + jobId, + queueId, + stepId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/dependencies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_steps(farm_id, job_id, queue_id) + list_steps(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Lists steps for a job. + +# Arguments +- `farm_id`: The farm ID to include on the list of steps. +- `job_id`: The job ID to include on the list of steps. +- `queue_id`: The queue ID to include on the list of steps. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_steps( + farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_steps( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_storage_profiles(farm_id) + list_storage_profiles(farm_id, params::Dict{String,<:Any}) + +Lists storage profiles. + +# Arguments +- `farm_id`: The farm ID of the storage profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_storage_profiles(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/storage-profiles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_storage_profiles( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/storage-profiles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_storage_profiles_for_queue(farm_id, queue_id) + list_storage_profiles_for_queue(farm_id, queue_id, params::Dict{String,<:Any}) + +Lists storage profiles for a queue. + +# Arguments +- `farm_id`: The farm ID of the queue's storage profile. +- `queue_id`: The queue ID for the storage profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_storage_profiles_for_queue( + farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/storage-profiles"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_storage_profiles_for_queue( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/storage-profiles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists tags for a resource. + +# Arguments +- `resource_arn`: The resource ARN to list tags for. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tasks(farm_id, job_id, queue_id, step_id) + list_tasks(farm_id, job_id, queue_id, step_id, params::Dict{String,<:Any}) + +Lists tasks for a job. + +# Arguments +- `farm_id`: The farm ID connected to the tasks. +- `job_id`: The job ID for the tasks. +- `queue_id`: The queue ID connected to the tasks. +- `step_id`: The step ID for the tasks. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_tasks( + farmId, jobId, queueId, stepId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tasks( + farmId, + jobId, + queueId, + stepId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workers(farm_id, fleet_id) + list_workers(farm_id, fleet_id, params::Dict{String,<:Any}) + +Lists workers. + +# Arguments +- `farm_id`: The farm ID connected to the workers. +- `fleet_id`: The fleet ID of the workers. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. Use this parameter with + NextToken to get results as a set of sequential pages. +- `"nextToken"`: The token for the next set of results, or null to start from the beginning. +""" +function list_workers(farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workers( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "GET", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_metered_product(license_endpoint_id, product_id) + put_metered_product(license_endpoint_id, product_id, params::Dict{String,<:Any}) + +Adds a metered product. + +# Arguments +- `license_endpoint_id`: The license endpoint ID to add to the metered product. +- `product_id`: The product ID to add to the metered product. + +""" +function put_metered_product( + licenseEndpointId, productId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PUT", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products/$(productId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_metered_product( + licenseEndpointId, + productId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PUT", + "/2023-10-12/license-endpoints/$(licenseEndpointId)/metered-products/$(productId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_jobs(farm_id, item_offset, queue_ids) + search_jobs(farm_id, item_offset, queue_ids, params::Dict{String,<:Any}) + +Searches for jobs. + +# Arguments +- `farm_id`: The farm ID of the job. +- `item_offset`: Defines how far into the scrollable list to start the return of results. +- `queue_ids`: The queue ID to use in the job search. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterExpressions"`: The filter expression, AND or OR, to use when searching among a + group of search strings in a resource. You can use two groupings per search each within + parenthesis (). +- `"pageSize"`: Specifies the number of items per page for the resource. +- `"sortExpressions"`: The search terms for a resource. +""" +function search_jobs( + farmId, itemOffset, queueIds; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/jobs", + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_jobs( + farmId, + itemOffset, + queueIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_steps(farm_id, item_offset, queue_ids) + search_steps(farm_id, item_offset, queue_ids, params::Dict{String,<:Any}) + +Searches for steps. + +# Arguments +- `farm_id`: The farm ID to use for the step search. +- `item_offset`: Defines how far into the scrollable list to start the return of results. +- `queue_ids`: The queue IDs in the step search. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterExpressions"`: The filter expression, AND or OR, to use when searching among a + group of search strings in a resource. You can use two groupings per search each within + parenthesis (). +- `"jobId"`: The job ID to use in the step search. +- `"pageSize"`: Specifies the number of items per page for the resource. +- `"sortExpressions"`: The search terms for a resource. +""" +function search_steps( + farmId, itemOffset, queueIds; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/steps", + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_steps( + farmId, + itemOffset, + queueIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/steps", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_tasks(farm_id, item_offset, queue_ids) + search_tasks(farm_id, item_offset, queue_ids, params::Dict{String,<:Any}) + +Searches for tasks. + +# Arguments +- `farm_id`: The farm ID of the task. +- `item_offset`: Defines how far into the scrollable list to start the return of results. +- `queue_ids`: The queue IDs to include in the search. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterExpressions"`: The filter expression, AND or OR, to use when searching among a + group of search strings in a resource. You can use two groupings per search each within + parenthesis (). +- `"jobId"`: The job ID for the task search. +- `"pageSize"`: Specifies the number of items per page for the resource. +- `"sortExpressions"`: The search terms for a resource. +""" +function search_tasks( + farmId, itemOffset, queueIds; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/tasks", + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_tasks( + farmId, + itemOffset, + queueIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/tasks", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("itemOffset" => itemOffset, "queueIds" => queueIds), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_workers(farm_id, fleet_ids, item_offset) + search_workers(farm_id, fleet_ids, item_offset, params::Dict{String,<:Any}) + +Searches for workers. + +# Arguments +- `farm_id`: The farm ID in the workers search. +- `fleet_ids`: The fleet ID of the workers to search for. +- `item_offset`: Defines how far into the scrollable list to start the return of results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterExpressions"`: The filter expression, AND or OR, to use when searching among a + group of search strings in a resource. You can use two groupings per search each within + parenthesis (). +- `"pageSize"`: Specifies the number of items per page for the resource. +- `"sortExpressions"`: The search terms for a resource. +""" +function search_workers( + farmId, fleetIds, itemOffset; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/workers", + Dict{String,Any}("fleetIds" => fleetIds, "itemOffset" => itemOffset); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_workers( + farmId, + fleetIds, + itemOffset, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/search/workers", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("fleetIds" => fleetIds, "itemOffset" => itemOffset), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_sessions_statistics_aggregation(end_time, farm_id, group_by, resource_ids, start_time, statistics) + start_sessions_statistics_aggregation(end_time, farm_id, group_by, resource_ids, start_time, statistics, params::Dict{String,<:Any}) + +Starts an asynchronous request for getting aggregated statistics about queues and farms. +Get the statistics using the GetSessionsStatisticsAggregation operation. Statistics are +available for 1 hour after you call the StartSessionsStatisticsAggregation operation. + +# Arguments +- `end_time`: The Linux timestamp of the date and time that the statistics end. +- `farm_id`: The identifier of the farm that contains queues or fleets to return statistics + for. +- `group_by`: The field to use to group the statistics. +- `resource_ids`: A list of fleet IDs or queue IDs to gather statistics for. +- `start_time`: The Linux timestamp of the date and time that the statistics start. +- `statistics`: One to four statistics to return. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"period"`: The period to aggregate the statistics. +- `"timezone"`: The timezone to use for the statistics. Use UTC notation such as \"UTC+8.\" +""" +function start_sessions_statistics_aggregation( + endTime, + farmId, + groupBy, + resourceIds, + startTime, + statistics; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/sessions-statistics-aggregation", + Dict{String,Any}( + "endTime" => endTime, + "groupBy" => groupBy, + "resourceIds" => resourceIds, + "startTime" => startTime, + "statistics" => statistics, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_sessions_statistics_aggregation( + endTime, + farmId, + groupBy, + resourceIds, + startTime, + statistics, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/farms/$(farmId)/sessions-statistics-aggregation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endTime" => endTime, + "groupBy" => groupBy, + "resourceIds" => resourceIds, + "startTime" => startTime, + "statistics" => statistics, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn) + tag_resource(resource_arn, params::Dict{String,<:Any}) + +Tags a resource using the resource's ARN and desired tags. + +# Arguments +- `resource_arn`: The ARN of the resource to apply tags to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: Each tag consists of a tag key and a tag value. Tag keys and values are both + required, but tag values can be empty strings. +""" +function tag_resource(resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "POST", + "/2023-10-12/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "POST", + "/2023-10-12/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag from a resource using the resource's ARN and tag to remove. + +# Arguments +- `resource_arn`: The ARN of the resource to remove the tag from. +- `tag_keys`: They keys of the tag. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "DELETE", + "/2023-10-12/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "DELETE", + "/2023-10-12/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_budget(budget_id, farm_id) + update_budget(budget_id, farm_id, params::Dict{String,<:Any}) + +Updates a budget that sets spending thresholds for rendering activity. + +# Arguments +- `budget_id`: The budget ID to update. +- `farm_id`: The farm ID of the budget to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"actionsToAdd"`: The budget actions to add. Budget actions specify what happens when the + budget runs out. +- `"actionsToRemove"`: The budget actions to remove from the budget. +- `"approximateDollarLimit"`: The dollar limit to update on the budget. Based on consumed + usage. +- `"description"`: The description of the budget to update. +- `"displayName"`: The display name of the budget to update. +- `"schedule"`: The schedule to update. +- `"status"`: Updates the status of the budget. ACTIVE–The budget is being evaluated. + INACTIVE–The budget is inactive. This can include Expired, Canceled, or deleted Deleted + statuses. +""" +function update_budget(budgetId, farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_budget( + budgetId, + farmId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/budgets/$(budgetId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_farm(farm_id) + update_farm(farm_id, params::Dict{String,<:Any}) + +Updates a farm. + +# Arguments +- `farm_id`: The farm ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the farm to update. +- `"displayName"`: The display name of the farm to update. +""" +function update_farm(farmId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_farm( + farmId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_fleet(farm_id, fleet_id) + update_fleet(farm_id, fleet_id, params::Dict{String,<:Any}) + +Updates a fleet. + +# Arguments +- `farm_id`: The farm ID to update. +- `fleet_id`: The fleet ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"configuration"`: The fleet configuration to update. +- `"description"`: The description of the fleet to update. +- `"displayName"`: The display name of the fleet to update. +- `"maxWorkerCount"`: The maximum number of workers in the fleet. +- `"minWorkerCount"`: The minimum number of workers in the fleet. +- `"roleArn"`: The IAM role ARN that the fleet's workers assume while running jobs. +""" +function update_fleet(farmId, fleetId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_fleet( + farmId, + fleetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_job(farm_id, job_id, queue_id) + update_job(farm_id, job_id, queue_id, params::Dict{String,<:Any}) + +Updates a job. + +# Arguments +- `farm_id`: The farm ID of the job to update. +- `job_id`: The job ID to update. +- `queue_id`: The queue ID of the job to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"lifecycleStatus"`: The status of a job in its lifecycle. +- `"maxFailedTasksCount"`: The number of task failures before the job stops running and is + marked as FAILED. +- `"maxRetriesPerTask"`: The maximum number of retries for a job. +- `"priority"`: The job priority to update. +- `"targetTaskRunStatus"`: The task status to update the job's tasks to. +""" +function update_job( + farmId, jobId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_job( + farmId, + jobId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_monitor(monitor_id) + update_monitor(monitor_id, params::Dict{String,<:Any}) + +Modifies the settings for a Deadline Cloud monitor. You can modify one or all of the +settings when you call UpdateMonitor. + +# Arguments +- `monitor_id`: The unique identifier of the monitor to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"displayName"`: The new value to use for the monitor's display name. +- `"roleArn"`: The Amazon Resource Name (ARN) of the new IAM role to use with the monitor. +- `"subdomain"`: The new value of the subdomain to use when forming the monitor URL. +""" +function update_monitor(monitorId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "PATCH", + "/2023-10-12/monitors/$(monitorId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_monitor( + monitorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/monitors/$(monitorId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_queue(farm_id, queue_id) + update_queue(farm_id, queue_id, params::Dict{String,<:Any}) + +Updates a queue. + +# Arguments +- `farm_id`: The farm ID to update in the queue. +- `queue_id`: The queue ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The idempotency token to update in the queue. +- `"allowedStorageProfileIdsToAdd"`: The storage profile IDs to add. +- `"allowedStorageProfileIdsToRemove"`: The storage profile ID to remove. +- `"defaultBudgetAction"`: The default action to take for a queue update if a budget isn't + configured. +- `"description"`: The description of the queue to update. +- `"displayName"`: The display name of the queue to update. +- `"jobAttachmentSettings"`: The job attachment settings to update for the queue. +- `"jobRunAsUser"`: Update the jobs in the queue to run as a specified POSIX user. +- `"requiredFileSystemLocationNamesToAdd"`: The required file system location names to add + to the queue. +- `"requiredFileSystemLocationNamesToRemove"`: The required file system location names to + remove from the queue. +- `"roleArn"`: The IAM role ARN that's used to run jobs from this queue. +""" +function update_queue(farmId, queueId; aws_config::AbstractAWSConfig=global_aws_config()) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_queue( + farmId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_queue_environment(farm_id, queue_environment_id, queue_id) + update_queue_environment(farm_id, queue_environment_id, queue_id, params::Dict{String,<:Any}) + +Updates the queue environment. + +# Arguments +- `farm_id`: The farm ID of the queue environment to update. +- `queue_environment_id`: The queue environment ID to update. +- `queue_id`: The queue ID of the queue environment to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"priority"`: The priority to update. +- `"template"`: The template to update. +- `"templateType"`: The template type to update. +""" +function update_queue_environment( + farmId, queueEnvironmentId, queueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_queue_environment( + farmId, + queueEnvironmentId, + queueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/environments/$(queueEnvironmentId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_queue_fleet_association(farm_id, fleet_id, queue_id, status) + update_queue_fleet_association(farm_id, fleet_id, queue_id, status, params::Dict{String,<:Any}) + +Updates a queue-fleet association. + +# Arguments +- `farm_id`: The farm ID to update. +- `fleet_id`: The fleet ID to update. +- `queue_id`: The queue ID to update. +- `status`: The status to update. + +""" +function update_queue_fleet_association( + farmId, fleetId, queueId, status; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_queue_fleet_association( + farmId, + fleetId, + queueId, + status, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queue-fleet-associations/$(queueId)/$(fleetId)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_session(farm_id, job_id, queue_id, session_id, target_lifecycle_status) + update_session(farm_id, job_id, queue_id, session_id, target_lifecycle_status, params::Dict{String,<:Any}) + +Updates a session. + +# Arguments +- `farm_id`: The farm ID to update in the session. +- `job_id`: The job ID to update in the session. +- `queue_id`: The queue ID to update in the session. +- `session_id`: The session ID to update. +- `target_lifecycle_status`: The life cycle status to update in the session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function update_session( + farmId, + jobId, + queueId, + sessionId, + targetLifecycleStatus; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions/$(sessionId)", + Dict{String,Any}( + "targetLifecycleStatus" => targetLifecycleStatus, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_session( + farmId, + jobId, + queueId, + sessionId, + targetLifecycleStatus, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/sessions/$(sessionId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "targetLifecycleStatus" => targetLifecycleStatus, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_step(farm_id, job_id, queue_id, step_id, target_task_run_status) + update_step(farm_id, job_id, queue_id, step_id, target_task_run_status, params::Dict{String,<:Any}) + +Updates a step. + +# Arguments +- `farm_id`: The farm ID to update. +- `job_id`: The job ID to update. +- `queue_id`: The queue ID to update. +- `step_id`: The step ID to update. +- `target_task_run_status`: The task status to update the step's tasks to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function update_step( + farmId, + jobId, + queueId, + stepId, + targetTaskRunStatus; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)", + Dict{String,Any}( + "targetTaskRunStatus" => targetTaskRunStatus, + "X-Amz-Client-Token" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_step( + farmId, + jobId, + queueId, + stepId, + targetTaskRunStatus, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "targetTaskRunStatus" => targetTaskRunStatus, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_storage_profile(farm_id, storage_profile_id) + update_storage_profile(farm_id, storage_profile_id, params::Dict{String,<:Any}) + +Updates a storage profile. + +# Arguments +- `farm_id`: The farm ID to update. +- `storage_profile_id`: The storage profile ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +- `"displayName"`: The display name of the storage profile to update. +- `"fileSystemLocationsToAdd"`: The file system location names to add. +- `"fileSystemLocationsToRemove"`: The file system location names to remove. +- `"osFamily"`: The OS system to update. +""" +function update_storage_profile( + farmId, storageProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)", + Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_storage_profile( + farmId, + storageProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/storage-profiles/$(storageProfileId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_task(farm_id, job_id, queue_id, step_id, target_run_status, task_id) + update_task(farm_id, job_id, queue_id, step_id, target_run_status, task_id, params::Dict{String,<:Any}) + +Updates a task. + +# Arguments +- `farm_id`: The farm ID to update. +- `job_id`: The job ID to update. +- `queue_id`: The queue ID to update. +- `step_id`: The step ID to update. +- `target_run_status`: The run status with which to start the task. +- `task_id`: The task ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of + the same request. +""" +function update_task( + farmId, + jobId, + queueId, + stepId, + targetRunStatus, + taskId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks/$(taskId)", + Dict{String,Any}( + "targetRunStatus" => targetRunStatus, "X-Amz-Client-Token" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_task( + farmId, + jobId, + queueId, + stepId, + targetRunStatus, + taskId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/queues/$(queueId)/jobs/$(jobId)/steps/$(stepId)/tasks/$(taskId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "targetRunStatus" => targetRunStatus, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_worker(farm_id, fleet_id, worker_id) + update_worker(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Updates a worker. + +# Arguments +- `farm_id`: The farm ID to update. +- `fleet_id`: The fleet ID to update. +- `worker_id`: The worker ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"capabilities"`: The worker capabilities to update. +- `"hostProperties"`: The host properties to update. +- `"status"`: The worker status to update. +""" +function update_worker( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_worker( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_worker_schedule(farm_id, fleet_id, worker_id) + update_worker_schedule(farm_id, fleet_id, worker_id, params::Dict{String,<:Any}) + +Updates the schedule for a worker. + +# Arguments +- `farm_id`: The farm ID to update. +- `fleet_id`: The fleet ID to update. +- `worker_id`: The worker ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"updatedSessionActions"`: The session actions associated with the worker schedule to + update. +""" +function update_worker_schedule( + farmId, fleetId, workerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/schedule"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_worker_schedule( + farmId, + fleetId, + workerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return deadline( + "PATCH", + "/2023-10-12/farms/$(farmId)/fleets/$(fleetId)/workers/$(workerId)/schedule", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/detective.jl b/src/services/detective.jl index dfb89a0081..f99d866562 100644 --- a/src/services/detective.jl +++ b/src/services/detective.jl @@ -129,17 +129,12 @@ end Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective. -Before you try to enable Detective, make sure that your account has been enrolled in Amazon -GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable -Detective. If you do meet the GuardDuty prerequisite, then when you make the request to -enable Detective, it checks whether your data volume is within the Detective quota. If it -exceeds the quota, then you cannot enable Detective. The operation also enables Detective -for the calling account in the currently selected Region. It returns the ARN of the new -behavior graph. CreateGraph triggers a process to create the corresponding data tables for -the new behavior graph. An account can only be the administrator account for one behavior -graph within a Region. If the same account calls CreateGraph with the same administrator -account, it always returns the same behavior graph ARN. It does not create a new behavior -graph. +The operation also enables Detective for the calling account in the currently selected +Region. It returns the ARN of the new behavior graph. CreateGraph triggers a process to +create the corresponding data tables for the new behavior graph. An account can only be the +administrator account for one behavior graph within a Region. If the same account calls +CreateGraph with the same administrator account, it always returns the same behavior graph +ARN. It does not create a new behavior graph. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -488,6 +483,55 @@ function enable_organization_admin_account( ) end +""" + get_investigation(graph_arn, investigation_id) + get_investigation(graph_arn, investigation_id, params::Dict{String,<:Any}) + +Detective investigations lets you investigate IAM users and IAM roles using indicators of +compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, +system, or environment that can (with a high level of confidence) identify malicious +activity or a security incident. GetInvestigation returns the investigation results of an +investigation for a behavior graph. + +# Arguments +- `graph_arn`: The Amazon Resource Name (ARN) of the behavior graph. +- `investigation_id`: The investigation ID of the investigation report. + +""" +function get_investigation( + GraphArn, InvestigationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return detective( + "POST", + "/investigations/getInvestigation", + Dict{String,Any}("GraphArn" => GraphArn, "InvestigationId" => InvestigationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_investigation( + GraphArn, + InvestigationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/getInvestigation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GraphArn" => GraphArn, "InvestigationId" => InvestigationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_members(account_ids, graph_arn) get_members(account_ids, graph_arn, params::Dict{String,<:Any}) @@ -611,6 +655,110 @@ function list_graphs( ) end +""" + list_indicators(graph_arn, investigation_id) + list_indicators(graph_arn, investigation_id, params::Dict{String,<:Any}) + +Gets the indicators from an investigation. You can use the information from the indicators +to determine if an IAM user and/or IAM role is involved in an unusual activity that could +indicate malicious behavior and its impact. + +# Arguments +- `graph_arn`: The Amazon Resource Name (ARN) of the behavior graph. +- `investigation_id`: The investigation ID of the investigation report. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IndicatorType"`: For the list of indicators of compromise that are generated by + Detective investigations, see Detective investigations. +- `"MaxResults"`: Lists the maximum number of indicators in a page. +- `"NextToken"`: Lists if there are more results available. The value of nextToken is a + unique pagination token for each page. Repeat the call using the returned token to retrieve + the next page. Keep all other arguments unchanged. Each pagination token expires after 24 + hours. Using an expired pagination token will return a Validation Exception error. +""" +function list_indicators( + GraphArn, InvestigationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return detective( + "POST", + "/investigations/listIndicators", + Dict{String,Any}("GraphArn" => GraphArn, "InvestigationId" => InvestigationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_indicators( + GraphArn, + InvestigationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/listIndicators", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GraphArn" => GraphArn, "InvestigationId" => InvestigationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_investigations(graph_arn) + list_investigations(graph_arn, params::Dict{String,<:Any}) + +Detective investigations lets you investigate IAM users and IAM roles using indicators of +compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, +system, or environment that can (with a high level of confidence) identify malicious +activity or a security incident. ListInvestigations lists all active Detective +investigations. + +# Arguments +- `graph_arn`: The Amazon Resource Name (ARN) of the behavior graph. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FilterCriteria"`: Filters the investigation results based on a criteria. +- `"MaxResults"`: Lists the maximum number of investigations in a page. +- `"NextToken"`: Lists if there are more results available. The value of nextToken is a + unique pagination token for each page. Repeat the call using the returned token to retrieve + the next page. Keep all other arguments unchanged. Each pagination token expires after 24 + hours. Using an expired pagination token will return a Validation Exception error. +- `"SortCriteria"`: Sorts the investigation results based on a criteria. +""" +function list_investigations(GraphArn; aws_config::AbstractAWSConfig=global_aws_config()) + return detective( + "POST", + "/investigations/listInvestigations", + Dict{String,Any}("GraphArn" => GraphArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_investigations( + GraphArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/listInvestigations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("GraphArn" => GraphArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_invitations() list_invitations(params::Dict{String,<:Any}) @@ -803,6 +951,73 @@ function reject_invitation( ) end +""" + start_investigation(entity_arn, graph_arn, scope_end_time, scope_start_time) + start_investigation(entity_arn, graph_arn, scope_end_time, scope_start_time, params::Dict{String,<:Any}) + +Detective investigations lets you investigate IAM users and IAM roles using indicators of +compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, +system, or environment that can (with a high level of confidence) identify malicious +activity or a security incident. StartInvestigation initiates an investigation on an entity +in a behavior graph. + +# Arguments +- `entity_arn`: The unique Amazon Resource Name (ARN) of the IAM user and IAM role. +- `graph_arn`: The Amazon Resource Name (ARN) of the behavior graph. +- `scope_end_time`: The data and time when the investigation ended. The value is an UTC + ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z. +- `scope_start_time`: The data and time when the investigation began. The value is an UTC + ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z. + +""" +function start_investigation( + EntityArn, + GraphArn, + ScopeEndTime, + ScopeStartTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/startInvestigation", + Dict{String,Any}( + "EntityArn" => EntityArn, + "GraphArn" => GraphArn, + "ScopeEndTime" => ScopeEndTime, + "ScopeStartTime" => ScopeStartTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_investigation( + EntityArn, + GraphArn, + ScopeEndTime, + ScopeStartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/startInvestigation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EntityArn" => EntityArn, + "GraphArn" => GraphArn, + "ScopeEndTime" => ScopeEndTime, + "ScopeStartTime" => ScopeStartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_monitoring_member(account_id, graph_arn) start_monitoring_member(account_id, graph_arn, params::Dict{String,<:Any}) @@ -972,6 +1187,58 @@ function update_datasource_packages( ) end +""" + update_investigation_state(graph_arn, investigation_id, state) + update_investigation_state(graph_arn, investigation_id, state, params::Dict{String,<:Any}) + +Updates the state of an investigation. + +# Arguments +- `graph_arn`: The Amazon Resource Name (ARN) of the behavior graph. +- `investigation_id`: The investigation ID of the investigation report. +- `state`: The current state of the investigation. An archived investigation indicates you + have completed reviewing the investigation. + +""" +function update_investigation_state( + GraphArn, InvestigationId, State; aws_config::AbstractAWSConfig=global_aws_config() +) + return detective( + "POST", + "/investigations/updateInvestigationState", + Dict{String,Any}( + "GraphArn" => GraphArn, "InvestigationId" => InvestigationId, "State" => State + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_investigation_state( + GraphArn, + InvestigationId, + State, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return detective( + "POST", + "/investigations/updateInvestigationState", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GraphArn" => GraphArn, + "InvestigationId" => InvestigationId, + "State" => State, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_organization_configuration(graph_arn) update_organization_configuration(graph_arn, params::Dict{String,<:Any}) diff --git a/src/services/devops_guru.jl b/src/services/devops_guru.jl index 99dd0e64f4..3138592fa4 100644 --- a/src/services/devops_guru.jl +++ b/src/services/devops_guru.jl @@ -11,16 +11,12 @@ using AWS.UUIDs Adds a notification channel to DevOps Guru. A notification channel is used to notify you about important DevOps Guru events, such as when an insight is generated. If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru -permission to it notifications. DevOps Guru adds the required policy on your behalf to send -notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS -topics. For more information, see Permissions for cross account Amazon SNS topics. If you -use an Amazon SNS topic in another account, you must attach a policy to it that grants -DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your -behalf to send notifications using Amazon SNS in your account. For more information, see -Permissions for cross account Amazon SNS topics. If you use an Amazon SNS topic that is -encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then -you must add permissions to the CMK. For more information, see Permissions for Amazon Web -Services KMS–encrypted Amazon SNS topics. +permission to send it notifications. DevOps Guru adds the required policy on your behalf to +send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS +topics. For more information, see Permissions for Amazon SNS topics. If you use an Amazon +SNS topic that is encrypted by an Amazon Web Services Key Management Service +customer-managed key (CMK), then you must add permissions to the CMK. For more information, +see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics. # Arguments - `config`: A NotificationChannelConfig object that specifies what type of notification diff --git a/src/services/dlm.jl b/src/services/dlm.jl index 65d2e8ba02..207e40dec4 100644 --- a/src/services/dlm.jl +++ b/src/services/dlm.jl @@ -5,30 +5,66 @@ using AWS.Compat using AWS.UUIDs """ - create_lifecycle_policy(description, execution_role_arn, policy_details, state) - create_lifecycle_policy(description, execution_role_arn, policy_details, state, params::Dict{String,<:Any}) + create_lifecycle_policy(description, execution_role_arn, state) + create_lifecycle_policy(description, execution_role_arn, state, params::Dict{String,<:Any}) -Creates a policy to manage the lifecycle of the specified Amazon Web Services resources. -You can create up to 100 lifecycle policies. +Creates an Amazon Data Lifecycle Manager lifecycle policy. Amazon Data Lifecycle Manager +supports the following policy types: Custom EBS snapshot policy Custom EBS-backed AMI +policy Cross-account copy event policy Default policy for EBS snapshots Default +policy for EBS-backed AMIs For more information, see Default policies vs custom +policies. If you create a default policy, you can specify the request parameters either in +the request body, or in the PolicyDetails request structure, but not both. # Arguments - `description`: A description of the lifecycle policy. The characters ^[0-9A-Za-z _-]+ are supported. - `execution_role_arn`: The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy. -- `policy_details`: The configuration details of the lifecycle policy. -- `state`: The desired activation state of the lifecycle policy after creation. +- `state`: The activation state of the lifecycle policy after creation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CopyTags"`: [Default policies only] Indicates whether the policy should copy tags from + the source resource to the snapshot or AMI. If you do not specify a value, the default is + false. Default: false +- `"CreateInterval"`: [Default policies only] Specifies how often the policy should run + and create snapshots or AMIs. The creation frequency can range from 1 to 7 days. If you do + not specify a value, the default is 1. Default: 1 +- `"CrossRegionCopyTargets"`: [Default policies only] Specifies destination Regions for + snapshot or AMI copies. You can specify up to 3 destination Regions. If you do not want to + create cross-Region copies, omit this parameter. +- `"DefaultPolicy"`: [Default policies only] Specify the type of default policy to create. + To create a default policy for EBS snapshots, that creates snapshots of all volumes in + the Region that do not have recent backups, specify VOLUME. To create a default policy + for EBS-backed AMIs, that creates EBS-backed AMIs from all instances in the Region that do + not have recent backups, specify INSTANCE. +- `"Exclusions"`: [Default policies only] Specifies exclusion parameters for volumes or + instances for which you do not want to create snapshots or AMIs. The policy will not create + snapshots or AMIs for target resources that match any of the specified exclusion parameters. +- `"ExtendDeletion"`: [Default policies only] Defines the snapshot or AMI retention + behavior for the policy if the source volume or instance is deleted, or if the policy + enters the error, disabled, or deleted state. By default (ExtendDeletion=false): If a + source resource is deleted, Amazon Data Lifecycle Manager will continue to delete + previously created snapshots or AMIs, up to but not including the last one, based on the + specified retention period. If you want Amazon Data Lifecycle Manager to delete all + snapshots or AMIs, including the last one, specify true. If a policy enters the error, + disabled, or deleted state, Amazon Data Lifecycle Manager stops deleting snapshots and + AMIs. If you want Amazon Data Lifecycle Manager to continue deleting snapshots or AMIs, + including the last one, if the policy enters one of these states, specify true. If you + enable extended deletion (ExtendDeletion=true), you override both default behaviors + simultaneously. If you do not specify a value, the default is false. Default: false +- `"PolicyDetails"`: The configuration details of the lifecycle policy. If you create a + default policy, you can specify the request parameters either in the request body, or in + the PolicyDetails request structure, but not both. +- `"RetainInterval"`: [Default policies only] Specifies how long the policy should retain + snapshots or AMIs before deleting them. The retention period can range from 2 to 14 days, + but it must be greater than the creation frequency to ensure that the policy retains at + least 1 snapshot or AMI at any given time. If you do not specify a value, the default is 7. + Default: 7 - `"Tags"`: The tags to apply to the lifecycle policy during creation. """ function create_lifecycle_policy( - Description, - ExecutionRoleArn, - PolicyDetails, - State; - aws_config::AbstractAWSConfig=global_aws_config(), + Description, ExecutionRoleArn, State; aws_config::AbstractAWSConfig=global_aws_config() ) return dlm( "POST", @@ -36,7 +72,6 @@ function create_lifecycle_policy( Dict{String,Any}( "Description" => Description, "ExecutionRoleArn" => ExecutionRoleArn, - "PolicyDetails" => PolicyDetails, "State" => State, ); aws_config=aws_config, @@ -46,7 +81,6 @@ end function create_lifecycle_policy( Description, ExecutionRoleArn, - PolicyDetails, State, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -60,7 +94,6 @@ function create_lifecycle_policy( Dict{String,Any}( "Description" => Description, "ExecutionRoleArn" => ExecutionRoleArn, - "PolicyDetails" => PolicyDetails, "State" => State, ), params, @@ -87,7 +120,7 @@ function delete_lifecycle_policy( ) return dlm( "DELETE", - "/policies/$(policyId)/"; + "/policies/$(policyId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -99,7 +132,7 @@ function delete_lifecycle_policy( ) return dlm( "DELETE", - "/policies/$(policyId)/", + "/policies/$(policyId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -115,6 +148,10 @@ complete information about a policy, use GetLifecyclePolicy. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"defaultPolicyType"`: [Default policies only] Specifies the type of default policy to + get. Specify one of the following: VOLUME - To get only the default policy for EBS + snapshots INSTANCE - To get only the default policy for EBS-backed AMIs ALL - To get + all default policies - `"policyIds"`: The identifiers of the data lifecycle policies. - `"resourceTypes"`: The resource type. - `"state"`: The activation state. @@ -147,7 +184,7 @@ Gets detailed information about the specified lifecycle policy. function get_lifecycle_policy(policyId; aws_config::AbstractAWSConfig=global_aws_config()) return dlm( "GET", - "/policies/$(policyId)/"; + "/policies/$(policyId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -159,7 +196,7 @@ function get_lifecycle_policy( ) return dlm( "GET", - "/policies/$(policyId)/", + "/policies/$(policyId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -284,11 +321,37 @@ Modify lifecycle policies. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CopyTags"`: [Default policies only] Indicates whether the policy should copy tags from + the source resource to the snapshot or AMI. +- `"CreateInterval"`: [Default policies only] Specifies how often the policy should run + and create snapshots or AMIs. The creation frequency can range from 1 to 7 days. +- `"CrossRegionCopyTargets"`: [Default policies only] Specifies destination Regions for + snapshot or AMI copies. You can specify up to 3 destination Regions. If you do not want to + create cross-Region copies, omit this parameter. - `"Description"`: A description of the lifecycle policy. +- `"Exclusions"`: [Default policies only] Specifies exclusion parameters for volumes or + instances for which you do not want to create snapshots or AMIs. The policy will not create + snapshots or AMIs for target resources that match any of the specified exclusion parameters. - `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy. +- `"ExtendDeletion"`: [Default policies only] Defines the snapshot or AMI retention + behavior for the policy if the source volume or instance is deleted, or if the policy + enters the error, disabled, or deleted state. By default (ExtendDeletion=false): If a + source resource is deleted, Amazon Data Lifecycle Manager will continue to delete + previously created snapshots or AMIs, up to but not including the last one, based on the + specified retention period. If you want Amazon Data Lifecycle Manager to delete all + snapshots or AMIs, including the last one, specify true. If a policy enters the error, + disabled, or deleted state, Amazon Data Lifecycle Manager stops deleting snapshots and + AMIs. If you want Amazon Data Lifecycle Manager to continue deleting snapshots or AMIs, + including the last one, if the policy enters one of these states, specify true. If you + enable extended deletion (ExtendDeletion=true), you override both default behaviors + simultaneously. Default: false - `"PolicyDetails"`: The configuration of the lifecycle policy. You cannot update the policy type or the resource type. +- `"RetainInterval"`: [Default policies only] Specifies how long the policy should retain + snapshots or AMIs before deleting them. The retention period can range from 2 to 14 days, + but it must be greater than the creation frequency to ensure that the policy retains at + least 1 snapshot or AMI at any given time. - `"State"`: The desired activation state of the lifecycle policy after creation. """ function update_lifecycle_policy( diff --git a/src/services/docdb.jl b/src/services/docdb.jl index a85fcf8b16..373f9fb640 100644 --- a/src/services/docdb.jl +++ b/src/services/docdb.jl @@ -406,6 +406,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. - `"StorageEncrypted"`: Specifies whether the cluster is encrypted. +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the + Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 + Default value is standard When you create a DocumentDB DB cluster with the storage type + set to iopt1, the storage type is returned in the response. The storage type isn't returned + when you set it to standard. - `"Tags"`: The tags to be assigned to the cluster. - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this cluster. @@ -592,6 +598,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AvailabilityZone"`: The Amazon EC2 Availability Zone that the instance is created in. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. For more information, see Updating Your Amazon DocumentDB TLS + Certificates and Encrypting Data in Transit in the Amazon DocumentDB Developer Guide. - `"CopyTagsToSnapshot"`: A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. - `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance @@ -1947,6 +1956,10 @@ configuration parameters by specifying these parameters and the new values in th # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are + allowed. Constraints: You must allow major version upgrades when specifying a value for the + EngineVersion parameter that is a different major version than the DB cluster's current + version. - `"ApplyImmediately"`: A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster. If this parameter is set to false, @@ -1970,7 +1983,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted. - `"EngineVersion"`: The version number of the database engine to which you want to - upgrade. Modifying engine version is not supported on Amazon DocumentDB. + upgrade. Changing this parameter results in an outage. The change is applied during the + next maintenance window unless ApplyImmediately is enabled. To list all of the available + engine versions for Amazon DocumentDB use the following command: aws docdb + describe-db-engine-versions --engine docdb --query \"DBEngineVersions[].EngineVersion\" - `"MasterUserPassword"`: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@). Constraints: Must contain from 8 to 100 characters. @@ -1991,6 +2007,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the + Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 + Default value is standard - `"VpcSecurityGroupIds"`: A list of virtual private cloud (VPC) security groups that the cluster will belong to. """ @@ -2183,6 +2203,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DocumentDB does not perform minor version upgrades regardless of the value set. - `"CACertificateIdentifier"`: Indicates the certificate that needs to be associated with the instance. +- `"CertificateRotationRestart"`: Specifies whether the DB instance is restarted when you + rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate + your SSL/TLS certificate. The certificate is not updated until the DB instance is + restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB + instance. If you are using SSL/TLS to connect to the DB instance, see Updating Your Amazon + DocumentDB TLS Certificates and Encrypting Data in Transit in the Amazon DocumentDB + Developer Guide. - `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. - `"DBInstanceClass"`: The new compute and memory capacity of the instance; for example, @@ -2701,6 +2728,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DB cluster is not encrypted. - `"Port"`: The port number on which the new cluster accepts connections. Constraints: Must be a value from 1150 to 65535. Default: The same port as the original cluster. +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the + Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 + Default value is standard - `"Tags"`: The tags to be assigned to the restored cluster. - `"VpcSecurityGroupIds"`: A list of virtual private cloud (VPC) security groups that the new cluster will belong to. @@ -2799,6 +2830,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys source DB cluster. Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11. If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster. +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the + Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 + Default value is standard - `"Tags"`: The tags to be assigned to the restored cluster. - `"UseLatestRestorableTime"`: A value that is set to true to restore the cluster to the latest restorable backup time, and false otherwise. Default: false Constraints: Cannot be @@ -2925,3 +2960,62 @@ function stop_dbcluster( feature_set=SERVICE_FEATURE_SET, ) end + +""" + switchover_global_cluster(global_cluster_identifier, target_db_cluster_identifier) + switchover_global_cluster(global_cluster_identifier, target_db_cluster_identifier, params::Dict{String,<:Any}) + +Switches over the specified secondary Amazon DocumentDB cluster to be the new primary +Amazon DocumentDB cluster in the global database cluster. + +# Arguments +- `global_cluster_identifier`: The identifier of the Amazon DocumentDB global database + cluster to switch over. The identifier is the unique key assigned by the user when the + cluster is created. In other words, it's the name of the global cluster. This parameter + isn’t case-sensitive. Constraints: Must match the identifier of an existing global + cluster (Amazon DocumentDB global database). Minimum length of 1. Maximum length of 255. + Pattern: [A-Za-z][0-9A-Za-z-:._]* +- `target_db_cluster_identifier`: The identifier of the secondary Amazon DocumentDB cluster + to promote to the new primary for the global database cluster. Use the Amazon Resource Name + (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web + Services region. Constraints: Must match the identifier of an existing secondary cluster. + Minimum length of 1. Maximum length of 255. Pattern: [A-Za-z][0-9A-Za-z-:._]* + +""" +function switchover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb( + "SwitchoverGlobalCluster", + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function switchover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb( + "SwitchoverGlobalCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/docdb_elastic.jl b/src/services/docdb_elastic.jl index c5b7802e84..a18273bf07 100644 --- a/src/services/docdb_elastic.jl +++ b/src/services/docdb_elastic.jl @@ -4,47 +4,119 @@ using AWS.AWSServices: docdb_elastic using AWS.Compat using AWS.UUIDs +""" + copy_cluster_snapshot(snapshot_arn, target_snapshot_name) + copy_cluster_snapshot(snapshot_arn, target_snapshot_name, params::Dict{String,<:Any}) + +Copies a snapshot of an elastic cluster. + +# Arguments +- `snapshot_arn`: The Amazon Resource Name (ARN) identifier of the elastic cluster snapshot. +- `target_snapshot_name`: The identifier of the new elastic cluster snapshot to create from + the source cluster snapshot. This parameter is not case sensitive. Constraints: Must + contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. + Cannot end with a hyphen or contain two consecutive hyphens. Example: + elastic-cluster-snapshot-5 + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"copyTags"`: Set to true to copy all tags from the source cluster snapshot to the target + elastic cluster snapshot. The default is false. +- `"kmsKeyId"`: The Amazon Web Services KMS key ID for an encrypted elastic cluster + snapshot. The Amazon Web Services KMS key ID is the Amazon Resource Name (ARN), Amazon Web + Services KMS key identifier, or the Amazon Web Services KMS key alias for the Amazon Web + Services KMS encryption key. If you copy an encrypted elastic cluster snapshot from your + Amazon Web Services account, you can specify a value for KmsKeyId to encrypt the copy with + a new Amazon Web ServicesS KMS encryption key. If you don't specify a value for KmsKeyId, + then the copy of the elastic cluster snapshot is encrypted with the same AWS KMS key as the + source elastic cluster snapshot. To copy an encrypted elastic cluster snapshot to another + Amazon Web Services region, set KmsKeyId to the Amazon Web Services KMS key ID that you + want to use to encrypt the copy of the elastic cluster snapshot in the destination region. + Amazon Web Services KMS encryption keys are specific to the Amazon Web Services region that + they are created in, and you can't use encryption keys from one Amazon Web Services region + in another Amazon Web Services region. If you copy an unencrypted elastic cluster snapshot + and specify a value for the KmsKeyId parameter, an error is returned. +- `"tags"`: The tags to be assigned to the elastic cluster snapshot. +""" +function copy_cluster_snapshot( + snapshotArn, targetSnapshotName; aws_config::AbstractAWSConfig=global_aws_config() +) + return docdb_elastic( + "POST", + "/cluster-snapshot/$(snapshotArn)/copy", + Dict{String,Any}("targetSnapshotName" => targetSnapshotName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_cluster_snapshot( + snapshotArn, + targetSnapshotName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb_elastic( + "POST", + "/cluster-snapshot/$(snapshotArn)/copy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("targetSnapshotName" => targetSnapshotName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_cluster(admin_user_name, admin_user_password, auth_type, cluster_name, shard_capacity, shard_count) create_cluster(admin_user_name, admin_user_password, auth_type, cluster_name, shard_capacity, shard_count, params::Dict{String,<:Any}) -Creates a new Elastic DocumentDB cluster and returns its Cluster structure. +Creates a new Amazon DocumentDB elastic cluster and returns its cluster structure. # Arguments -- `admin_user_name`: The name of the Elastic DocumentDB cluster administrator. +- `admin_user_name`: The name of the Amazon DocumentDB elastic clusters administrator. Constraints: Must be from 1 to 63 letters or numbers. The first character must be a letter. Cannot be a reserved word. -- `admin_user_password`: The password for the Elastic DocumentDB cluster administrator and - can contain any printable ASCII characters. Constraints: Must contain from 8 to 100 - characters. Cannot contain a forward slash (/), double quote (\"), or the \"at\" symbol - (@). -- `auth_type`: The authentication type for the Elastic DocumentDB cluster. -- `cluster_name`: The name of the new Elastic DocumentDB cluster. This parameter is stored - as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or - hyphens. The first character must be a letter. Cannot end with a hyphen or contain two - consecutive hyphens. Example: my-cluster -- `shard_capacity`: The capacity of each shard in the new Elastic DocumentDB cluster. -- `shard_count`: The number of shards to create in the new Elastic DocumentDB cluster. +- `admin_user_password`: The password for the Amazon DocumentDB elastic clusters + administrator. The password can contain any printable ASCII characters. Constraints: + Must contain from 8 to 100 characters. Cannot contain a forward slash (/), double quote + (\"), or the \"at\" symbol (@). +- `auth_type`: The authentication type used to determine where to fetch the password used + for accessing the elastic cluster. Valid types are PLAIN_TEXT or SECRET_ARN. +- `cluster_name`: The name of the new elastic cluster. This parameter is stored as a + lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. + The first character must be a letter. Cannot end with a hyphen or contain two consecutive + hyphens. Example: my-cluster +- `shard_capacity`: The number of vCPUs assigned to each elastic cluster shard. Maximum is + 64. Allowed values are 2, 4, 8, 16, 32, 64. +- `shard_count`: The number of shards assigned to the elastic cluster. Maximum is 32. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: The client token for the Elastic DocumentDB cluster. -- `"kmsKeyId"`: The KMS key identifier to use to encrypt the new Elastic DocumentDB - cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption - key. If you are creating a cluster using the same Amazon account that owns this KMS - encryption key, you can use the KMS key alias instead of the ARN as the KMS encryption key. - If an encryption key is not specified, Elastic DocumentDB uses the default encryption key - that KMS creates for your account. Your account has a different default encryption key for - each Amazon Region. +- `"backupRetentionPeriod"`: The number of days for which automatic snapshots are retained. +- `"clientToken"`: The client token for the elastic cluster. +- `"kmsKeyId"`: The KMS key identifier to use to encrypt the new elastic cluster. The KMS + key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are + creating a cluster using the same Amazon account that owns this KMS encryption key, you can + use the KMS key alias instead of the ARN as the KMS encryption key. If an encryption key is + not specified, Amazon DocumentDB uses the default encryption key that KMS creates for your + account. Your account has a different default encryption key for each Amazon Region. +- `"preferredBackupWindow"`: The daily time range during which automated backups are + created if automated backups are enabled, as determined by the backupRetentionPeriod. - `"preferredMaintenanceWindow"`: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi Default: a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. -- `"subnetIds"`: The Amazon EC2 subnet IDs for the new Elastic DocumentDB cluster. -- `"tags"`: The tags to be assigned to the new Elastic DocumentDB cluster. +- `"shardInstanceCount"`: The number of replica instances applying to all shards in the + elastic cluster. A shardInstanceCount value of 1 means there is one writer instance, and + any additional instances are replicas that can be used for reads and to improve + availability. +- `"subnetIds"`: The Amazon EC2 subnet IDs for the new elastic cluster. +- `"tags"`: The tags to be assigned to the new elastic cluster. - `"vpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the new - Elastic DocumentDB cluster. + elastic cluster. """ function create_cluster( adminUserName, @@ -108,16 +180,16 @@ end create_cluster_snapshot(cluster_arn, snapshot_name) create_cluster_snapshot(cluster_arn, snapshot_name, params::Dict{String,<:Any}) -Creates a snapshot of a cluster. +Creates a snapshot of an elastic cluster. # Arguments -- `cluster_arn`: The arn of the Elastic DocumentDB cluster that the snapshot will be taken - from. -- `snapshot_name`: The name of the Elastic DocumentDB snapshot. +- `cluster_arn`: The ARN identifier of the elastic cluster of which you want to create a + snapshot. +- `snapshot_name`: The name of the new elastic cluster snapshot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"tags"`: The tags to be assigned to the new Elastic DocumentDB snapshot. +- `"tags"`: The tags to be assigned to the new elastic cluster snapshot. """ function create_cluster_snapshot( clusterArn, snapshotName; aws_config::AbstractAWSConfig=global_aws_config() @@ -157,10 +229,10 @@ end delete_cluster(cluster_arn) delete_cluster(cluster_arn, params::Dict{String,<:Any}) -Delete a Elastic DocumentDB cluster. +Delete an elastic cluster. # Arguments -- `cluster_arn`: The arn of the Elastic DocumentDB cluster that is to be deleted. +- `cluster_arn`: The ARN identifier of the elastic cluster that is to be deleted. """ function delete_cluster(clusterArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -189,10 +261,10 @@ end delete_cluster_snapshot(snapshot_arn) delete_cluster_snapshot(snapshot_arn, params::Dict{String,<:Any}) -Delete a Elastic DocumentDB snapshot. +Delete an elastic cluster snapshot. # Arguments -- `snapshot_arn`: The arn of the Elastic DocumentDB snapshot that is to be deleted. +- `snapshot_arn`: The ARN identifier of the elastic cluster snapshot that is to be deleted. """ function delete_cluster_snapshot( @@ -223,10 +295,10 @@ end get_cluster(cluster_arn) get_cluster(cluster_arn, params::Dict{String,<:Any}) -Returns information about a specific Elastic DocumentDB cluster. +Returns information about a specific elastic cluster. # Arguments -- `cluster_arn`: The arn of the Elastic DocumentDB cluster. +- `cluster_arn`: The ARN identifier of the elastic cluster. """ function get_cluster(clusterArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -255,10 +327,10 @@ end get_cluster_snapshot(snapshot_arn) get_cluster_snapshot(snapshot_arn, params::Dict{String,<:Any}) -Returns information about a specific Elastic DocumentDB snapshot +Returns information about a specific elastic cluster snapshot # Arguments -- `snapshot_arn`: The arn of the Elastic DocumentDB snapshot. +- `snapshot_arn`: The ARN identifier of the elastic cluster snapshot. """ function get_cluster_snapshot( @@ -289,13 +361,21 @@ end list_cluster_snapshots() list_cluster_snapshots(params::Dict{String,<:Any}) -Returns information about Elastic DocumentDB snapshots for a specified cluster. +Returns information about snapshots for a specified elastic cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clusterArn"`: The arn of the Elastic DocumentDB cluster. -- `"maxResults"`: The maximum number of entries to recieve in the response. -- `"nextToken"`: The nextToken which is used the get the next page of data. +- `"clusterArn"`: The ARN identifier of the elastic cluster. +- `"maxResults"`: The maximum number of elastic cluster snapshot results to receive in the + response. +- `"nextToken"`: A pagination token provided by a previous request. If this parameter is + specified, the response includes only records beyond this token, up to the value specified + by max-results. If there is no more data in the responce, the nextToken will not be + returned. +- `"snapshotType"`: The type of cluster snapshots to be returned. You can specify one of + the following values: automated - Return all cluster snapshots that Amazon DocumentDB + has automatically created for your Amazon Web Services account. manual - Return all + cluster snapshots that you have manually created for your Amazon Web Services account. """ function list_cluster_snapshots(; aws_config::AbstractAWSConfig=global_aws_config()) return docdb_elastic( @@ -318,12 +398,16 @@ end list_clusters() list_clusters(params::Dict{String,<:Any}) -Returns information about provisioned Elastic DocumentDB clusters. +Returns information about provisioned Amazon DocumentDB elastic clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of entries to recieve in the response. -- `"nextToken"`: The nextToken which is used the get the next page of data. +- `"maxResults"`: The maximum number of elastic cluster snapshot results to receive in the + response. +- `"nextToken"`: A pagination token provided by a previous request. If this parameter is + specified, the response includes only records beyond this token, up to the value specified + by max-results. If there is no more data in the responce, the nextToken will not be + returned. """ function list_clusters(; aws_config::AbstractAWSConfig=global_aws_config()) return docdb_elastic( @@ -342,10 +426,10 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Lists all tags on a Elastic DocumentDB resource +Lists all tags on a elastic cluster resource # Arguments -- `resource_arn`: The arn of the Elastic DocumentDB resource. +- `resource_arn`: The ARN identifier of the elastic cluster resource. """ function list_tags_for_resource( @@ -376,27 +460,32 @@ end restore_cluster_from_snapshot(cluster_name, snapshot_arn) restore_cluster_from_snapshot(cluster_name, snapshot_arn, params::Dict{String,<:Any}) -Restores a Elastic DocumentDB cluster from a snapshot. +Restores an elastic cluster from a snapshot. # Arguments -- `cluster_name`: The name of the Elastic DocumentDB cluster. -- `snapshot_arn`: The arn of the Elastic DocumentDB snapshot. +- `cluster_name`: The name of the elastic cluster. +- `snapshot_arn`: The ARN identifier of the elastic cluster snapshot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"kmsKeyId"`: The KMS key identifier to use to encrypt the new Elastic DocumentDB - cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption - key. If you are creating a cluster using the same Amazon account that owns this KMS - encryption key, you can use the KMS key alias instead of the ARN as the KMS encryption key. - If an encryption key is not specified here, Elastic DocumentDB uses the default encryption - key that KMS creates for your account. Your account has a different default encryption key - for each Amazon Region. -- `"subnetIds"`: The Amazon EC2 subnet IDs for the Elastic DocumentDB cluster. -- `"tags"`: A list of the tag names to be assigned to the restored DB cluster, in the form - of an array of key-value pairs in which the key is the tag name and the value is the key - value. -- `"vpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the Elastic - DocumentDB cluster. +- `"kmsKeyId"`: The KMS key identifier to use to encrypt the new Amazon DocumentDB elastic + clusters cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS + encryption key. If you are creating a cluster using the same Amazon account that owns this + KMS encryption key, you can use the KMS key alias instead of the ARN as the KMS encryption + key. If an encryption key is not specified here, Amazon DocumentDB uses the default + encryption key that KMS creates for your account. Your account has a different default + encryption key for each Amazon Region. +- `"shardCapacity"`: The capacity of each shard in the new restored elastic cluster. +- `"shardInstanceCount"`: The number of replica instances applying to all shards in the + elastic cluster. A shardInstanceCount value of 1 means there is one writer instance, and + any additional instances are replicas that can be used for reads and to improve + availability. +- `"subnetIds"`: The Amazon EC2 subnet IDs for the elastic cluster. +- `"tags"`: A list of the tag names to be assigned to the restored elastic cluster, in the + form of an array of key-value pairs in which the key is the tag name and the value is the + key value. +- `"vpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the elastic + cluster. """ function restore_cluster_from_snapshot( clusterName, snapshotArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -426,15 +515,80 @@ function restore_cluster_from_snapshot( ) end +""" + start_cluster(cluster_arn) + start_cluster(cluster_arn, params::Dict{String,<:Any}) + +Restarts the stopped elastic cluster that is specified by clusterARN. + +# Arguments +- `cluster_arn`: The ARN identifier of the elastic cluster. + +""" +function start_cluster(clusterArn; aws_config::AbstractAWSConfig=global_aws_config()) + return docdb_elastic( + "POST", + "/cluster/$(clusterArn)/start"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_cluster( + clusterArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb_elastic( + "POST", + "/cluster/$(clusterArn)/start", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_cluster(cluster_arn) + stop_cluster(cluster_arn, params::Dict{String,<:Any}) + +Stops the running elastic cluster that is specified by clusterArn. The elastic cluster must +be in the available state. + +# Arguments +- `cluster_arn`: The ARN identifier of the elastic cluster. + +""" +function stop_cluster(clusterArn; aws_config::AbstractAWSConfig=global_aws_config()) + return docdb_elastic( + "POST", + "/cluster/$(clusterArn)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_cluster( + clusterArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb_elastic( + "POST", + "/cluster/$(clusterArn)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds metadata tags to a Elastic DocumentDB resource +Adds metadata tags to an elastic cluster resource # Arguments -- `resource_arn`: The arn of the Elastic DocumentDB resource. -- `tags`: The tags to be assigned to the Elastic DocumentDB resource. +- `resource_arn`: The ARN identifier of the elastic cluster resource. +- `tags`: The tags that are assigned to the elastic cluster resource. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -465,11 +619,11 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes metadata tags to a Elastic DocumentDB resource +Removes metadata tags from an elastic cluster resource # Arguments -- `resource_arn`: The arn of the Elastic DocumentDB resource. -- `tag_keys`: The tag keys to be removed from the Elastic DocumentDB resource. +- `resource_arn`: The ARN identifier of the elastic cluster resource. +- `tag_keys`: The tag keys to be removed from the elastic cluster resource. """ function untag_resource( @@ -502,29 +656,38 @@ end update_cluster(cluster_arn) update_cluster(cluster_arn, params::Dict{String,<:Any}) -Modifies a Elastic DocumentDB cluster. This includes updating admin-username/password, -upgrading API version setting up a backup window and maintenance window +Modifies an elastic cluster. This includes updating admin-username/password, upgrading the +API version, and setting up a backup window and maintenance window # Arguments -- `cluster_arn`: The arn of the Elastic DocumentDB cluster. +- `cluster_arn`: The ARN identifier of the elastic cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"adminUserPassword"`: The password for the Elastic DocumentDB cluster administrator. +- `"adminUserPassword"`: The password associated with the elastic cluster administrator. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@). Constraints: Must contain from 8 to 100 characters. -- `"authType"`: The authentication type for the Elastic DocumentDB cluster. -- `"clientToken"`: The client token for the Elastic DocumentDB cluster. +- `"authType"`: The authentication type used to determine where to fetch the password used + for accessing the elastic cluster. Valid types are PLAIN_TEXT or SECRET_ARN. +- `"backupRetentionPeriod"`: The number of days for which automatic snapshots are retained. +- `"clientToken"`: The client token for the elastic cluster. +- `"preferredBackupWindow"`: The daily time range during which automated backups are + created if automated backups are enabled, as determined by the backupRetentionPeriod. - `"preferredMaintenanceWindow"`: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi Default: a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. -- `"shardCapacity"`: The capacity of each shard in the Elastic DocumentDB cluster. -- `"shardCount"`: The number of shards to create in the Elastic DocumentDB cluster. -- `"subnetIds"`: The number of shards to create in the Elastic DocumentDB cluster. -- `"vpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the new - Elastic DocumentDB cluster. +- `"shardCapacity"`: The number of vCPUs assigned to each elastic cluster shard. Maximum is + 64. Allowed values are 2, 4, 8, 16, 32, 64. +- `"shardCount"`: The number of shards assigned to the elastic cluster. Maximum is 32. +- `"shardInstanceCount"`: The number of replica instances applying to all shards in the + elastic cluster. A shardInstanceCount value of 1 means there is one writer instance, and + any additional instances are replicas that can be used for reads and to improve + availability. +- `"subnetIds"`: The Amazon EC2 subnet IDs for the elastic cluster. +- `"vpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the elastic + cluster. """ function update_cluster(clusterArn; aws_config::AbstractAWSConfig=global_aws_config()) return docdb_elastic( diff --git a/src/services/drs.jl b/src/services/drs.jl index dba20eceb0..1d51fc753c 100644 --- a/src/services/drs.jl +++ b/src/services/drs.jl @@ -108,7 +108,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"copyTags"`: Copy tags. - `"exportBucketArn"`: S3 bucket ARN to export Source Network templates. - `"launchDisposition"`: Launch disposition. +- `"launchIntoSourceInstance"`: DRS will set the 'launch into instance ID' of any source + server when performing a drill, recovery or failback to the previous region or availability + zone, using the instance ID of the source instance. - `"licensing"`: Licensing. +- `"postLaunchEnabled"`: Whether we want to activate post-launch actions. - `"tags"`: Request to associate tags during creation of a Launch Configuration Template. - `"targetInstanceTypeRightSizingMethod"`: Target instance type right-sizing method. """ @@ -337,6 +341,49 @@ function delete_job( ) end +""" + delete_launch_action(action_id, resource_id) + delete_launch_action(action_id, resource_id, params::Dict{String,<:Any}) + +Deletes a resource launch action. + +# Arguments +- `action_id`: +- `resource_id`: + +""" +function delete_launch_action( + actionId, resourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return drs( + "POST", + "/DeleteLaunchAction", + Dict{String,Any}("actionId" => actionId, "resourceId" => resourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_launch_action( + actionId, + resourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return drs( + "POST", + "/DeleteLaunchAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("actionId" => actionId, "resourceId" => resourceId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_launch_configuration_template(launch_configuration_template_id) delete_launch_configuration_template(launch_configuration_template_id, params::Dict{String,<:Any}) @@ -1141,6 +1188,46 @@ function list_extensible_source_servers( ) end +""" + list_launch_actions(resource_id) + list_launch_actions(resource_id, params::Dict{String,<:Any}) + +Lists resource launch actions. + +# Arguments +- `resource_id`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: Filters to apply when listing resource launch actions. +- `"maxResults"`: Maximum amount of items to return when listing resource launch actions. +- `"nextToken"`: Next token to use when listing resource launch actions. +""" +function list_launch_actions(resourceId; aws_config::AbstractAWSConfig=global_aws_config()) + return drs( + "POST", + "/ListLaunchActions", + Dict{String,Any}("resourceId" => resourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_launch_actions( + resourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return drs( + "POST", + "/ListLaunchActions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceId" => resourceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_staging_accounts() list_staging_accounts(params::Dict{String,<:Any}) @@ -1206,6 +1293,100 @@ function list_tags_for_resource( ) end +""" + put_launch_action(action_code, action_id, action_version, active, category, description, name, optional, order, resource_id) + put_launch_action(action_code, action_id, action_version, active, category, description, name, optional, order, resource_id, params::Dict{String,<:Any}) + +Puts a resource launch action. + +# Arguments +- `action_code`: Launch action code. +- `action_id`: +- `action_version`: +- `active`: Whether the launch action is active. +- `category`: +- `description`: +- `name`: +- `optional`: Whether the launch will not be marked as failed if this action fails. +- `order`: +- `resource_id`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: +""" +function put_launch_action( + actionCode, + actionId, + actionVersion, + active, + category, + description, + name, + optional, + order, + resourceId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return drs( + "POST", + "/PutLaunchAction", + Dict{String,Any}( + "actionCode" => actionCode, + "actionId" => actionId, + "actionVersion" => actionVersion, + "active" => active, + "category" => category, + "description" => description, + "name" => name, + "optional" => optional, + "order" => order, + "resourceId" => resourceId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_launch_action( + actionCode, + actionId, + actionVersion, + active, + category, + description, + name, + optional, + order, + resourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return drs( + "POST", + "/PutLaunchAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "actionCode" => actionCode, + "actionId" => actionId, + "actionVersion" => actionVersion, + "active" => active, + "category" => category, + "description" => description, + "name" => name, + "optional" => optional, + "order" => order, + "resourceId" => resourceId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retry_data_replication(source_server_id) retry_data_replication(source_server_id, params::Dict{String,<:Any}) @@ -1796,8 +1977,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the Recovery Instance. - `"launchDisposition"`: The state of the Recovery Instance in EC2 after the recovery operation. +- `"launchIntoInstanceProperties"`: Launch into existing instance properties. - `"licensing"`: The licensing configuration to be used for this launch configuration. - `"name"`: The name of the launch configuration. +- `"postLaunchEnabled"`: Whether we want to enable post-launch actions for the Source + Server. - `"targetInstanceTypeRightSizingMethod"`: Whether Elastic Disaster Recovery should try to automatically choose the instance type that best matches the OS, CPU, and RAM of your Source Server. @@ -1844,7 +2028,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"copyTags"`: Copy tags. - `"exportBucketArn"`: S3 bucket ARN to export Source Network templates. - `"launchDisposition"`: Launch disposition. +- `"launchIntoSourceInstance"`: DRS will set the 'launch into instance ID' of any source + server when performing a drill, recovery or failback to the previous region or availability + zone, using the instance ID of the source instance. - `"licensing"`: Licensing. +- `"postLaunchEnabled"`: Whether we want to activate post-launch actions. - `"targetInstanceTypeRightSizingMethod"`: Target instance type right-sizing method. """ function update_launch_configuration_template( diff --git a/src/services/dynamodb.jl b/src/services/dynamodb.jl index dc524e7e66..c7c65bfb39 100644 --- a/src/services/dynamodb.jl +++ b/src/services/dynamodb.jl @@ -87,36 +87,37 @@ minimum read capacity units according to the type of read. For more information, Working with Tables in the Amazon DynamoDB Developer Guide. # Arguments -- `request_items`: A map of one or more table names and, for each table, a map that - describes one or more items to retrieve from that table. Each table name can be used only - once per BatchGetItem request. Each element in the map of items to retrieve consists of the - following: ConsistentRead - If true, a strongly consistent read is used; if false (the - default), an eventually consistent read is used. ExpressionAttributeNames - One or more - substitution tokens for attribute names in the ProjectionExpression parameter. The - following are some use cases for using ExpressionAttributeNames: To access an attribute - whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating - occurrences of an attribute name in an expression. To prevent special characters in an - attribute name from being misinterpreted in an expression. Use the # character in an - expression to dereference an attribute name. For example, consider the following attribute - name: Percentile The name of this attribute conflicts with a reserved word, so it - cannot be used directly in an expression. (For the complete list of reserved words, see - Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could - specify the following for ExpressionAttributeNames: {\"#P\":\"Percentile\"} You could - then use this substitution in an expression, as in this example: #P = :val Tokens - that begin with the : character are expression attribute values, which are placeholders for - the actual value at runtime. For more information about expression attribute names, see - Accessing Item Attributes in the Amazon DynamoDB Developer Guide. Keys - An array of - primary key attribute values that define specific items in the table. For each primary key, - you must provide all of the key attributes. For example, with a simple primary key, you - only need to provide the partition key value. For a composite key, you must provide both - the partition key value and the sort key value. ProjectionExpression - A string that - identifies one or more attributes to retrieve from the table. These attributes can include - scalars, sets, or elements of a JSON document. The attributes in the expression must be - separated by commas. If no attribute names are specified, then all attributes are returned. - If any of the requested attributes are not found, they do not appear in the result. For - more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. - AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more - information, see AttributesToGet in the Amazon DynamoDB Developer Guide. +- `request_items`: A map of one or more table names or table ARNs and, for each table, a + map that describes one or more items to retrieve from that table. Each table name or ARN + can be used only once per BatchGetItem request. Each element in the map of items to + retrieve consists of the following: ConsistentRead - If true, a strongly consistent read + is used; if false (the default), an eventually consistent read is used. + ExpressionAttributeNames - One or more substitution tokens for attribute names in the + ProjectionExpression parameter. The following are some use cases for using + ExpressionAttributeNames: To access an attribute whose name conflicts with a DynamoDB + reserved word. To create a placeholder for repeating occurrences of an attribute name in + an expression. To prevent special characters in an attribute name from being + misinterpreted in an expression. Use the # character in an expression to dereference an + attribute name. For example, consider the following attribute name: Percentile The + name of this attribute conflicts with a reserved word, so it cannot be used directly in an + expression. (For the complete list of reserved words, see Reserved Words in the Amazon + DynamoDB Developer Guide). To work around this, you could specify the following for + ExpressionAttributeNames: {\"#P\":\"Percentile\"} You could then use this + substitution in an expression, as in this example: #P = :val Tokens that begin with + the : character are expression attribute values, which are placeholders for the actual + value at runtime. For more information about expression attribute names, see Accessing + Item Attributes in the Amazon DynamoDB Developer Guide. Keys - An array of primary key + attribute values that define specific items in the table. For each primary key, you must + provide all of the key attributes. For example, with a simple primary key, you only need to + provide the partition key value. For a composite key, you must provide both the partition + key value and the sort key value. ProjectionExpression - A string that identifies one or + more attributes to retrieve from the table. These attributes can include scalars, sets, or + elements of a JSON document. The attributes in the expression must be separated by commas. + If no attribute names are specified, then all attributes are returned. If any of the + requested attributes are not found, they do not appear in the result. For more information, + see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. AttributesToGet - + This is a legacy parameter. Use ProjectionExpression instead. For more information, see + AttributesToGet in the Amazon DynamoDB Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -195,25 +196,26 @@ BatchWriteItem request. For example, you cannot put and delete the same item in BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request -size exceeds 16 MB. +size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a +partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes. # Arguments -- `request_items`: A map of one or more table names and, for each table, a list of - operations to be performed (DeleteRequest or PutRequest). Each element in the map consists - of the following: DeleteRequest - Perform a DeleteItem operation on the specified item. - The item to be deleted is identified by a Key subelement: Key - A map of primary key - attribute values that uniquely identify the item. Each entry in this map consists of an - attribute name and an attribute value. For each primary key, you must provide all of the - key attributes. For example, with a simple primary key, you only need to provide a value - for the partition key. For a composite primary key, you must provide values for both the - partition key and the sort key. PutRequest - Perform a PutItem operation on the - specified item. The item to be put is identified by an Item subelement: Item - A map of - attributes and their values. Each entry in this map consists of an attribute name and an - attribute value. Attribute values must not be null; string and binary type attributes must - have lengths greater than zero; and set type attributes must not be empty. Requests that - contain empty values are rejected with a ValidationException exception. If you specify any - attributes that are part of an index key, then the data types for those attributes must - match those of the schema in the table's attribute definition. +- `request_items`: A map of one or more table names or table ARNs and, for each table, a + list of operations to be performed (DeleteRequest or PutRequest). Each element in the map + consists of the following: DeleteRequest - Perform a DeleteItem operation on the + specified item. The item to be deleted is identified by a Key subelement: Key - A map of + primary key attribute values that uniquely identify the item. Each entry in this map + consists of an attribute name and an attribute value. For each primary key, you must + provide all of the key attributes. For example, with a simple primary key, you only need to + provide a value for the partition key. For a composite primary key, you must provide values + for both the partition key and the sort key. PutRequest - Perform a PutItem operation + on the specified item. The item to be put is identified by an Item subelement: Item - A + map of attributes and their values. Each entry in this map consists of an attribute name + and an attribute value. Attribute values must not be null; string and binary type + attributes must have lengths greater than zero; and set type attributes must not be empty. + Requests that contain empty values are rejected with a ValidationException exception. If + you specify any attributes that are part of an index key, then the data types for those + attributes must match those of the schema in the table's attribute definition. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -267,7 +269,8 @@ write capacity # Arguments - `backup_name`: Specified name for the backup. -- `table_name`: The name of the table. +- `table_name`: The name of the table. You can also provide the Amazon Resource Name (ARN) + of the table in this parameter. """ function create_backup( @@ -306,28 +309,29 @@ end Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided -Regions. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We -recommend using Version 2019.11.21 (Current) when creating new global tables, as it -provides greater flexibility, higher efficiency and consumes less write capacity than -2017.11.29 (Legacy). To determine which version you are using, see Determining the version. -To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 -(Current), see Updating global tables. If you want to add a new replica table to a -global table, each of the following conditions must be true: The table must have the same -primary key as all of the other replicas. The table must have the same name as all of the -other replicas. The table must have DynamoDB Streams enabled, with the stream containing -both the new and the old images of the item. None of the replica tables in the global -table can contain any data. If global secondary indexes are specified, then the -following conditions must also be met: The global secondary indexes must have the same -name. The global secondary indexes must have the same hash key and sort key (if -present). If local secondary indexes are specified, then the following conditions must -also be met: The local secondary indexes must have the same name. The local -secondary indexes must have the same hash key and sort key (if present). Write -capacity settings should be set consistently across your replica tables and secondary -indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity -settings for all of your global tables replicas and indexes. If you prefer to manage -write capacity settings manually, you should provision equal replicated write capacity -units to your replica tables. You should also provision equal replicated write capacity -units to matching secondary indexes across your global table. +Regions. This documentation is for version 2017.11.29 (Legacy) of global tables, which +should be avoided for new global tables. Customers should use Global Tables version +2019.11.21 (Current) when possible, because it provides greater flexibility, higher +efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which +version you're using, see Determining the global table version you are using. To update +existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), +see Upgrading global tables. If you want to add a new replica table to a global table, +each of the following conditions must be true: The table must have the same primary key +as all of the other replicas. The table must have the same name as all of the other +replicas. The table must have DynamoDB Streams enabled, with the stream containing both +the new and the old images of the item. None of the replica tables in the global table +can contain any data. If global secondary indexes are specified, then the following +conditions must also be met: The global secondary indexes must have the same name. +The global secondary indexes must have the same hash key and sort key (if present). If +local secondary indexes are specified, then the following conditions must also be met: +The local secondary indexes must have the same name. The local secondary indexes must +have the same hash key and sort key (if present). Write capacity settings should be +set consistently across your replica tables and secondary indexes. DynamoDB strongly +recommends enabling auto scaling to manage the write capacity settings for all of your +global tables replicas and indexes. If you prefer to manage write capacity settings +manually, you should provision equal replicated write capacity units to your replica +tables. You should also provision equal replicated write capacity units to matching +secondary indexes across your global table. # Arguments - `global_table_name`: The global table name. @@ -404,15 +408,16 @@ status. in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide. -- `table_name`: The name of the table to create. +- `table_name`: The name of the table to create. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BillingMode"`: Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned - Mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. - PAY_PER_REQUEST sets the billing mode to On-Demand Mode. + capacity mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode. - `"DeletionProtectionEnabled"`: Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. - `"GlobalSecondaryIndexes"`: One or more global secondary indexes (the maximum is 20) to @@ -450,12 +455,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. +- `"OnDemandThroughput"`: Sets the maximum number of read and write units for the specified + table in on-demand capacity mode. If you use this parameter, you must specify + MaxReadRequestUnits, MaxWriteRequestUnits, or both. - `"ProvisionedThroughput"`: Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. If you set BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. +- `"ResourcePolicy"`: An Amazon Web Services resource-based policy document in JSON format + that will be attached to the table. When you attach a resource-based policy while creating + a table, the policy application is strongly consistent. The maximum size supported for a + resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the + size of a policy against this limit. For a full list of all considerations that apply for + resource-based policies, see Resource-based policy considerations. You need to specify the + CreateTable and PutResourcePolicy IAM actions for authorizing a user to create a table with + a resource-based policy. - `"SSESpecification"`: Represents the settings used to enable server-side encryption. - `"StreamSpecification"`: The settings for DynamoDB Streams on the table. These settings consist of: StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) @@ -566,7 +582,8 @@ Otherwise, the item is not deleted. example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. -- `table_name`: The name of the table from which to delete the item. +- `table_name`: The name of the table from which to delete the item. You can also provide + the Amazon Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -618,6 +635,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys from the small network and processing overhead of receiving a larger response. No read capacity units are consumed. The ReturnValues parameter is used by several DynamoDB operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for a DeleteItem operation that failed a condition check. There is no additional + cost associated with requesting a return value aside from the small network and processing + overhead of receiving a larger response. No read capacity units are consumed. """ function delete_item(Key, TableName; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -645,6 +666,61 @@ function delete_item( ) end +""" + delete_resource_policy(resource_arn) + delete_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Deletes the resource-based policy attached to the resource, which can be a table or stream. + DeleteResourcePolicy is an idempotent operation; running it multiple times on the same +resource doesn't result in an error response, unless you specify an ExpectedRevisionId, +which will then return a PolicyNotFoundException. To make sure that you don't +inadvertently lock yourself out of your own resources, the root principal in your Amazon +Web Services account can perform DeleteResourcePolicy requests, even if your resource-based +policy explicitly denies the root principal's access. DeleteResourcePolicy is an +asynchronous operation. If you issue a GetResourcePolicy request immediately after running +the DeleteResourcePolicy request, DynamoDB might still return the deleted policy. This is +because the policy for your resource might not have been deleted yet. Wait for a few +seconds, and then try the GetResourcePolicy request again. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the DynamoDB resource from which the + policy will be removed. The resources you can specify include tables and streams. If you + remove the policy of a table, it will also remove the permissions for the table's indexes + defined in that policy document. This is because index permissions are defined in the + table's policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpectedRevisionId"`: A string value that you can use to conditionally delete your + policy. When you provide an expected revision ID, if the revision ID of the existing policy + on the resource doesn't match or if there's no policy attached to the resource, the request + will fail and return a PolicyNotFoundException. +""" +function delete_resource_policy( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return dynamodb( + "DeleteResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return dynamodb( + "DeleteResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_table(table_name) delete_table(table_name, params::Dict{String,<:Any}) @@ -654,16 +730,18 @@ request, the specified table is in the DELETING state until DynamoDB completes t deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the -DELETING state, no error is returned. This operation only applies to Version 2019.11.21 -(Current) of global tables. DynamoDB might continue to accept data read and write -operations, such as GetItem and PutItem, on a table in the DELETING state until the table -deletion is complete. When you delete a table, any indexes on that table are also deleted. -If you have DynamoDB Streams enabled on the table, then the corresponding stream on that -table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. -Use the DescribeTable action to check the status of the table. +DELETING state, no error is returned. For global tables, this operation only applies to +global tables using Version 2019.11.21 (Current version). DynamoDB might continue to +accept data read and write operations, such as GetItem and PutItem, on a table in the +DELETING state until the table deletion is complete. When you delete a table, any indexes +on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the +corresponding stream on that table goes into the DISABLED state, and the stream is +automatically deleted after 24 hours. Use the DescribeTable action to check the status of +the table. # Arguments -- `table_name`: The name of the table to delete. +- `table_name`: The name of the table to delete. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. """ function delete_table(TableName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -738,7 +816,8 @@ time during the last 35 days. You can call DescribeContinuousBackups at a maxim # Arguments - `table_name`: Name of the table for which the customer wants to check the continuous - backups and point in time recovery settings. + backups and point in time recovery settings. You can also provide the Amazon Resource Name + (ARN) of the table in this parameter. """ function describe_continuous_backups( @@ -773,7 +852,8 @@ end Returns information about contributor insights for a given table or global secondary index. # Arguments -- `table_name`: The name of the table to describe. +- `table_name`: The name of the table to describe. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -808,9 +888,8 @@ end describe_endpoints() describe_endpoints(params::Dict{String,<:Any}) -Returns the regional endpoint information. This action must be included in your VPC -endpoint policies, or access to the DescribeEndpoints API will be denied. For more -information on policy permissions, please see Internetwork traffic privacy. +Returns the regional endpoint information. For more information on policy permissions, +please see Internetwork traffic privacy. """ function describe_endpoints(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -863,12 +942,13 @@ end describe_global_table(global_table_name) describe_global_table(global_table_name, params::Dict{String,<:Any}) -Returns information about the specified global table. This operation only applies to -Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 -(Current) when creating new global tables, as it provides greater flexibility, higher -efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which -version you are using, see Determining the version. To update existing global tables from -version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. +Returns information about the specified global table. This documentation is for version +2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. +Customers should use Global Tables version 2019.11.21 (Current) when possible, because it +provides greater flexibility, higher efficiency, and consumes less write capacity than +2017.11.29 (Legacy). To determine which version you're using, see Determining the global +table version you are using. To update existing global tables from version 2017.11.29 +(Legacy) to version 2019.11.21 (Current), see Upgrading global tables. # Arguments - `global_table_name`: The name of the global table. @@ -905,12 +985,13 @@ end describe_global_table_settings(global_table_name) describe_global_table_settings(global_table_name, params::Dict{String,<:Any}) -Describes Region-specific settings for a global table. This operation only applies to -Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 -(Current) when creating new global tables, as it provides greater flexibility, higher -efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which -version you are using, see Determining the version. To update existing global tables from -version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. +Describes Region-specific settings for a global table. This documentation is for version +2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. +Customers should use Global Tables version 2019.11.21 (Current) when possible, because it +provides greater flexibility, higher efficiency, and consumes less write capacity than +2017.11.29 (Legacy). To determine which version you're using, see Determining the global +table version you are using. To update existing global tables from version 2017.11.29 +(Legacy) to version 2019.11.21 (Current), see Upgrading global tables. # Arguments - `global_table_name`: The name of the global table to describe. @@ -984,7 +1065,8 @@ end Returns information about the status of Kinesis streaming. # Arguments -- `table_name`: The name of the table being described. +- `table_name`: The name of the table being described. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. """ function describe_kinesis_streaming_destination( @@ -1066,15 +1148,16 @@ end describe_table(table_name, params::Dict{String,<:Any}) Returns information about the table, including the current status of the table, when it was -created, the primary key schema, and any indexes on the table. This operation only applies -to Version 2019.11.21 (Current) of global tables. If you issue a DescribeTable request -immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. -This is because DescribeTable uses an eventually consistent query, and the metadata for -your table might not be available at that moment. Wait for a few seconds, and then try the -DescribeTable request again. +created, the primary key schema, and any indexes on the table. For global tables, this +operation only applies to global tables using Version 2019.11.21 (Current version). If +you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might +return a ResourceNotFoundException. This is because DescribeTable uses an eventually +consistent query, and the metadata for your table might not be available at that moment. +Wait for a few seconds, and then try the DescribeTable request again. # Arguments -- `table_name`: The name of the table to describe. +- `table_name`: The name of the table to describe. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. """ function describe_table(TableName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1104,11 +1187,13 @@ end describe_table_replica_auto_scaling(table_name) describe_table_replica_auto_scaling(table_name, params::Dict{String,<:Any}) -Describes auto scaling settings across replicas of the global table at once. This -operation only applies to Version 2019.11.21 (Current) of global tables. +Describes auto scaling settings across replicas of the global table at once. For global +tables, this operation only applies to global tables using Version 2019.11.21 (Current +version). # Arguments -- `table_name`: The name of the table. +- `table_name`: The name of the table. You can also provide the Amazon Resource Name (ARN) + of the table in this parameter. """ function describe_table_replica_auto_scaling( @@ -1143,7 +1228,8 @@ end Gives a description of the Time to Live (TTL) status on the specified table. # Arguments -- `table_name`: The name of the table to be described. +- `table_name`: The name of the table to be described. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. """ function describe_time_to_live(TableName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1178,8 +1264,13 @@ deleting either of the resources. # Arguments - `stream_arn`: The ARN for a Kinesis data stream. -- `table_name`: The name of the DynamoDB table. +- `table_name`: The name of the DynamoDB table. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EnableKinesisStreamingConfiguration"`: The source for the Kinesis streaming information + that is being enabled. """ function disable_kinesis_streaming_destination( StreamArn, TableName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1222,8 +1313,13 @@ ACTIVE. # Arguments - `stream_arn`: The ARN for a Kinesis data stream. -- `table_name`: The name of the DynamoDB table. +- `table_name`: The name of the DynamoDB table. You can also provide the Amazon Resource + Name (ARN) of the table in this parameter. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EnableKinesisStreamingConfiguration"`: The source for the Kinesis streaming information + that is being enabled. """ function enable_kinesis_streaming_destination( StreamArn, TableName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1288,6 +1384,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys statement response. - `"Parameters"`: The parameters for the PartiQL statement, if any. - `"ReturnConsumedCapacity"`: +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for an ExecuteStatement operation that failed a condition check. There is no + additional cost associated with requesting a return value aside from the small network and + processing overhead of receiving a larger response. No read capacity units are consumed. """ function execute_statement(Statement; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -1394,8 +1494,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ExportTime"`: Time in the past from which to export table data, counted in seconds from the start of the Unix epoch. The table export will be a snapshot of the table's state at this point in time. +- `"ExportType"`: Choice of whether to execute as a full export or incremental export. + Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. If + INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also be used. +- `"IncrementalExportSpecification"`: Optional object containing the parameters specific to + an incremental export. - `"S3BucketOwner"`: The ID of the Amazon Web Services account that owns the bucket the - export will be stored in. + export will be stored in. S3BucketOwner is a required parameter when exporting to a S3 + bucket in another account. - `"S3Prefix"`: The Amazon S3 bucket prefix to use as the file name and path of the exported snapshot. - `"S3SseAlgorithm"`: Type of encryption used on the bucket where export data will be @@ -1457,7 +1563,8 @@ always returns the last updated value. example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. -- `table_name`: The name of the table containing the requested item. +- `table_name`: The name of the table containing the requested item. You can also provide + the Amazon Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1515,6 +1622,56 @@ function get_item( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Returns the resource-based policy document attached to the resource, which can be a table +or stream, in JSON format. GetResourcePolicy follows an eventually consistent model. The +following list describes the outcomes when you issue the GetResourcePolicy request +immediately after issuing another request: If you issue a GetResourcePolicy request +immediately after a PutResourcePolicy request, DynamoDB might return a +PolicyNotFoundException. If you issue a GetResourcePolicyrequest immediately after a +DeleteResourcePolicy request, DynamoDB might return the policy that was present before the +deletion request. If you issue a GetResourcePolicy request immediately after a +CreateTable request, which includes a resource-based policy, DynamoDB might return a +ResourceNotFoundException or a PolicyNotFoundException. Because GetResourcePolicy uses an +eventually consistent query, the metadata for your policy or table might not be available +at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request. After +a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, +the policy will be applied in the authorization of requests to the resource. Because this +process is eventually consistent, it will take some time to apply the policy to all +requests to a resource. Policies that you attach while creating a table using the +CreateTable request will always be applied to all requests for that table. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the DynamoDB resource to which the + policy is attached. The resources you can specify include tables and streams. + +""" +function get_resource_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return dynamodb( + "GetResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return dynamodb( + "GetResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ import_table(input_format, s3_bucket_source, table_creation_parameters) import_table(input_format, s3_bucket_source, table_creation_parameters, params::Dict{String,<:Any}) @@ -1589,12 +1746,14 @@ end list_backups() list_backups(params::Dict{String,<:Any}) -List backups associated with an Amazon Web Services account. To list backups for a given -table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB -worth of items in a page. You can also specify a maximum number of entries to be returned -in a page. In the request, start time is inclusive, but end time is exclusive. Note that -these boundaries are for the time at which the original backup was requested. You can call -ListBackups a maximum of five times per second. +List DynamoDB backups that are associated with an Amazon Web Services account and weren't +made with Amazon Web Services Backup. To list these backups for a given table, specify +TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items +in a page. You can also specify a maximum number of entries to be returned in a page. In +the request, start time is inclusive, but end time is exclusive. Note that these boundaries +are for the time at which the original backup was requested. You can call ListBackups a +maximum of five times per second. If you want to retrieve the complete list of backups made +with Amazon Web Services Backup, use the Amazon Web Services Backup list API. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1607,7 +1766,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results. - `"Limit"`: Maximum number of backups to return at once. -- `"TableName"`: The backups from the table specified by TableName are listed. +- `"TableName"`: Lists the backups from the table specified in TableName. You can also + provide the Amazon Resource Name (ARN) of the table in this parameter. - `"TimeRangeLowerBound"`: Only backups created after this time are listed. TimeRangeLowerBound is inclusive. - `"TimeRangeUpperBound"`: Only backups created before this time are listed. @@ -1635,7 +1795,8 @@ indexes. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: Maximum number of results to return per page. - `"NextToken"`: A token to for the desired page, if there is one. -- `"TableName"`: The name of the table. +- `"TableName"`: The name of the table. You can also provide the Amazon Resource Name (ARN) + of the table in this parameter. """ function list_contributor_insights(; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -1682,13 +1843,13 @@ end list_global_tables() list_global_tables(params::Dict{String,<:Any}) -Lists all global tables that have a replica in the specified Region. This operation only -applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version -2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, -higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine -which version you are using, see Determining the version. To update existing global tables -from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global -tables. +Lists all global tables that have a replica in the specified Region. This documentation is +for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global +tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, +because it provides greater flexibility, higher efficiency, and consumes less write +capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining +the global table version you are using. To update existing global tables from version +2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1841,7 +2002,8 @@ information about PutItem, see Working with Items in the Amazon DynamoDB Develop have a length greater than zero if the attribute is used as a key attribute for a table or index. For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide. Each element in the Item map is an AttributeValue object. -- `table_name`: The name of the table to contain the item. +- `table_name`: The name of the table to contain the item. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1895,6 +2057,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response. No read capacity units are consumed. The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for a PutItem operation that failed a condition check. There is no additional + cost associated with requesting a return value aside from the small network and processing + overhead of receiving a larger response. No read capacity units are consumed. """ function put_item(Item, TableName; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -1922,6 +2088,80 @@ function put_item( ) end +""" + put_resource_policy(policy, resource_arn) + put_resource_policy(policy, resource_arn, params::Dict{String,<:Any}) + +Attaches a resource-based policy document to the resource, which can be a table or stream. +When you attach a resource-based policy using this API, the policy application is +eventually consistent . PutResourcePolicy is an idempotent operation; running it multiple +times on the same resource using the same policy document will return the same revision ID. +If you specify an ExpectedRevisionId that doesn't match the current policy's RevisionId, +the PolicyNotFoundException will be returned. PutResourcePolicy is an asynchronous +operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy +request, DynamoDB might return your previous policy, if there was one, or return the +PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent +query, and the metadata for your policy or table might not be available at that moment. +Wait for a few seconds, and then try the GetResourcePolicy request again. + +# Arguments +- `policy`: An Amazon Web Services resource-based policy document in JSON format. The + maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts + whitespaces when calculating the size of a policy against this limit. Within a + resource-based policy, if the action for a DynamoDB service-linked role (SLR) to replicate + data for a global table is denied, adding or deleting a replica will fail with an error. + For a full list of all considerations that apply while attaching a resource-based policy, + see Resource-based policy considerations. +- `resource_arn`: The Amazon Resource Name (ARN) of the DynamoDB resource to which the + policy will be attached. The resources you can specify include tables and streams. You can + control index permissions using the base table's policy. To specify the same permission + level for your table and its indexes, you can provide both the table and index Amazon + Resource Name (ARN)s in the Resource field of a given Statement in your policy document. + Alternatively, to specify different permissions for your table, indexes, or both, you can + define multiple Statement fields in your policy document. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConfirmRemoveSelfResourceAccess"`: Set this parameter to true to confirm that you want + to remove your permissions to change the policy of this resource in the future. +- `"ExpectedRevisionId"`: A string value that you can use to conditionally update your + policy. You can provide the revision ID of your existing policy to make mutating requests + against that policy. When you provide an expected revision ID, if the revision ID of the + existing policy on the resource doesn't match or if there's no policy attached to the + resource, your request will be rejected with a PolicyNotFoundException. To conditionally + attach a policy when no policy exists for the resource, specify NO_POLICY for the revision + ID. +""" +function put_resource_policy( + Policy, ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return dynamodb( + "PutResourcePolicy", + Dict{String,Any}("Policy" => Policy, "ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_resource_policy( + Policy, + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return dynamodb( + "PutResourcePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Policy" => Policy, "ResourceArn" => ResourceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ query(table_name) query(table_name, params::Dict{String,<:Any}) @@ -1961,7 +2201,8 @@ secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index. # Arguments -- `table_name`: The name of the table containing the requested items. +- `table_name`: The name of the table containing the requested items. You can also provide + the Amazon Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2146,6 +2387,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LocalSecondaryIndexOverride"`: List of local secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore. +- `"OnDemandThroughputOverride"`: - `"ProvisionedThroughputOverride"`: Provisioned throughput settings for the restored table. - `"SSESpecificationOverride"`: The new server-side encryption settings for the restored table. @@ -2188,10 +2430,10 @@ end Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any -point in time during the last 35 days. Any number of users can execute up to 4 concurrent -restores (any type of restore) in a given account. When you restore using point in time +point in time during the last 35 days. Any number of users can execute up to 50 concurrent +restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and -time (day:hour:minute:second) to a new table. Along with data, the following are also +time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: Global secondary indexes (GSIs) Local secondary indexes (LSIs) Provisioned read and write capacity Encryption settings All these settings come from the current settings of the source table @@ -2211,6 +2453,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LocalSecondaryIndexOverride"`: List of local secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore. +- `"OnDemandThroughputOverride"`: - `"ProvisionedThroughputOverride"`: Provisioned throughput settings for the restored table. - `"RestoreDateTime"`: Time in the past to restore the table to. - `"SSESpecificationOverride"`: The new server-side encryption settings for the restored @@ -2254,26 +2497,37 @@ end The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a -FilterExpression operation. If the total number of scanned items exceeds the maximum -dataset size limit of 1 MB, the scan stops and results are returned to the user as a -LastEvaluatedKey value to continue the scan in a subsequent operation. The results also -include the number of items exceeding the limit. A scan can result in no table data meeting -the filter criteria. A single Scan operation reads up to the maximum number of items set -(if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to -the results using FilterExpression. If LastEvaluatedKey is present in the response, you -need to paginate the result set. For more information, see Paginating the Results in the -Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for -faster performance on a large table or secondary index, applications can request a parallel -Scan operation by providing the Segment and TotalSegments parameters. For more information, -see Parallel Scan in the Amazon DynamoDB Developer Guide. Scan uses eventually consistent -reads when accessing the data in a table; therefore, the result set might not include the -changes to data in the table immediately before the operation began. If you need a -consistent copy of the data, as of the time that the Scan begins, you can set the -ConsistentRead parameter to true. +FilterExpression operation. If the total size of scanned items exceeds the maximum dataset +size limit of 1 MB, the scan completes and results are returned to the user. The +LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to +continue the scan in a subsequent operation. Each scan response also includes number of +items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, +a scan result can result in no items meeting the criteria and the Count will result in +zero. If you did not use a FilterExpression in the scan request, then Count is the same as +ScannedCount. Count and ScannedCount only return the count of items specific to a single +scan request and, unless the table is less than 1MB, do not represent the total number of +items in the table. A single Scan operation first reads up to the maximum number of items +set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any +filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present +in the response, pagination is required to complete the full table scan. For more +information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan +operations proceed sequentially; however, for faster performance on a large table or +secondary index, applications can request a parallel Scan operation by providing the +Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon +DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when +accessing the items in a table. Therefore, the results from an eventually consistent Scan +may not include the latest item changes at the time the scan iterates through each item in +the table. If you require a strongly consistent read of each item as the scan iterates +through the items in the table, you can set the ConsistentRead parameter to true. Strong +consistency only relates to the consistency of the read at the item level. DynamoDB does +not provide snapshot isolation for a scan operation when the ConsistentRead parameter is +set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan +see a consistent snapshot of the table when the scan operation was requested. # Arguments -- `table_name`: The name of the table containing the requested items; or, if you provide - IndexName, the name of the table to which that index belongs. +- `table_name`: The name of the table containing the requested items or if you provide + IndexName, the name of the table to which that index belongs. You can also provide the + Amazon Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2665,7 +2919,8 @@ your table to any point in time during the last 35 days. # Arguments - `point_in_time_recovery_specification`: Represents the settings used to enable point in time recovery. -- `table_name`: The name of the table. +- `table_name`: The name of the table. You can also provide the Amazon Resource Name (ARN) + of the table in this parameter. """ function update_continuous_backups( @@ -2720,7 +2975,8 @@ for this table. # Arguments - `contributor_insights_action`: Represents the contributor insights action. -- `table_name`: The name of the table. +- `table_name`: The name of the table. You can also provide the Amazon Resource Name (ARN) + of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2769,20 +3025,21 @@ end Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the -same provisioned and maximum write capacity units. This operation only applies to Version -2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when -creating new global tables, as it provides greater flexibility, higher efficiency and -consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are -using, see Determining the version. To update existing global tables from version -2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. This -operation only applies to Version 2017.11.29 of global tables. If you are using global -tables Version 2019.11.21 you can use DescribeTable instead. Although you can use -UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity -we recommend that you issue separate requests for adding or removing replicas. If global -secondary indexes are specified, then the following conditions must also be met: The -global secondary indexes must have the same name. The global secondary indexes must -have the same hash key and sort key (if present). The global secondary indexes must -have the same provisioned and maximum write capacity units. +same provisioned and maximum write capacity units. This documentation is for version +2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. +Customers should use Global Tables version 2019.11.21 (Current) when possible, because it +provides greater flexibility, higher efficiency, and consumes less write capacity than +2017.11.29 (Legacy). To determine which version you're using, see Determining the global +table version you are using. To update existing global tables from version 2017.11.29 +(Legacy) to version 2019.11.21 (Current), see Upgrading global tables. For global +tables, this operation only applies to global tables using Version 2019.11.21 (Current +version). If you are using global tables Version 2019.11.21 you can use UpdateTable +instead. Although you can use UpdateGlobalTable to add replicas and remove replicas in a +single request, for simplicity we recommend that you issue separate requests for adding or +removing replicas. If global secondary indexes are specified, then the following +conditions must also be met: The global secondary indexes must have the same name. +The global secondary indexes must have the same hash key and sort key (if present). The +global secondary indexes must have the same provisioned and maximum write capacity units. # Arguments - `global_table_name`: The global table name. @@ -2828,12 +3085,13 @@ end update_global_table_settings(global_table_name) update_global_table_settings(global_table_name, params::Dict{String,<:Any}) -Updates settings for a global table. This operation only applies to Version 2017.11.29 -(Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating -new global tables, as it provides greater flexibility, higher efficiency and consumes less -write capacity than 2017.11.29 (Legacy). To determine which version you are using, see -Determining the version. To update existing global tables from version 2017.11.29 (Legacy) -to version 2019.11.21 (Current), see Updating global tables. +Updates settings for a global table. This documentation is for version 2017.11.29 (Legacy) +of global tables, which should be avoided for new global tables. Customers should use +Global Tables version 2019.11.21 (Current) when possible, because it provides greater +flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). +To determine which version you're using, see Determining the global table version you are +using. To update existing global tables from version 2017.11.29 (Legacy) to version +2019.11.21 (Current), see Upgrading global tables. # Arguments - `global_table_name`: The name of the global table @@ -2843,9 +3101,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GlobalTableBillingMode"`: The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode. PROVISIONED - We recommend using PROVISIONED for predictable workloads. - PROVISIONED sets the billing mode to Provisioned Mode. PAY_PER_REQUEST - We recommend - using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to - On-Demand Mode. + PROVISIONED sets the billing mode to Provisioned capacity mode. PAY_PER_REQUEST - We + recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the + billing mode to On-demand capacity mode. - `"GlobalTableGlobalSecondaryIndexSettingsUpdate"`: Represents the settings of a global secondary index for a global table that will be modified. - `"GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate"`: Auto scaling settings @@ -2899,7 +3157,8 @@ ReturnValues parameter. attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. -- `table_name`: The name of the table containing the item to update. +- `table_name`: The name of the table containing the item to update. You can also provide + the Amazon Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2956,6 +3215,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed. The values returned are strongly consistent. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for an UpdateItem operation that failed a condition check. There is no + additional cost associated with requesting a return value aside from the small network and + processing overhead of receiving a larger response. No read capacity units are consumed. - `"UpdateExpression"`: An expression that defines one or more attributes to be updated, the action to be performed on them, and new values for them. The following action values are available for UpdateExpression. SET - Adds one or more attributes and values to an @@ -3023,22 +3286,70 @@ function update_item( ) end +""" + update_kinesis_streaming_destination(stream_arn, table_name) + update_kinesis_streaming_destination(stream_arn, table_name, params::Dict{String,<:Any}) + +The command to update the Kinesis stream destination. + +# Arguments +- `stream_arn`: The Amazon Resource Name (ARN) for the Kinesis stream input. +- `table_name`: The table name for the Kinesis streaming destination input. You can also + provide the ARN of the table in this parameter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"UpdateKinesisStreamingConfiguration"`: The command to update the Kinesis stream + configuration. +""" +function update_kinesis_streaming_destination( + StreamArn, TableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return dynamodb( + "UpdateKinesisStreamingDestination", + Dict{String,Any}("StreamArn" => StreamArn, "TableName" => TableName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_kinesis_streaming_destination( + StreamArn, + TableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return dynamodb( + "UpdateKinesisStreamingDestination", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("StreamArn" => StreamArn, "TableName" => TableName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_table(table_name) update_table(table_name, params::Dict{String,<:Any}) Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams -settings for a given table. This operation only applies to Version 2019.11.21 (Current) of -global tables. You can only perform one of the following operations at once: Modify the -provisioned throughput settings of the table. Remove a global secondary index from the -table. Create a new global secondary index on the table. After the index begins -backfilling, you can use UpdateTable to perform other operations. UpdateTable is an -asynchronous operation; while it is executing, the table status changes from ACTIVE to -UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the -table returns to the ACTIVE state, the UpdateTable operation is complete. +settings for a given table. For global tables, this operation only applies to global +tables using Version 2019.11.21 (Current version). You can only perform one of the +following operations at once: Modify the provisioned throughput settings of the table. +Remove a global secondary index from the table. Create a new global secondary index on +the table. After the index begins backfilling, you can use UpdateTable to perform other +operations. UpdateTable is an asynchronous operation; while it's executing, the table +status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another +UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation +is complete. # Arguments -- `table_name`: The name of the table to be updated. +- `table_name`: The name of the table to be updated. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3050,9 +3361,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes. PROVISIONED - We recommend using PROVISIONED for - predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode. + predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. - PAY_PER_REQUEST sets the billing mode to On-Demand Mode. + PAY_PER_REQUEST sets the billing mode to On-demand capacity mode. - `"DeletionProtectionEnabled"`: Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. - `"GlobalSecondaryIndexUpdates"`: An array of one or more global secondary indexes for the @@ -3062,14 +3373,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys table. You can create or delete only one global secondary index per UpdateTable operation. For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide. +- `"OnDemandThroughput"`: Updates the maximum number of read and write units for the + specified table in on-demand capacity mode. If you use this parameter, you must specify + MaxReadRequestUnits, MaxWriteRequestUnits, or both. - `"ProvisionedThroughput"`: The new provisioned throughput settings for the specified table or index. - `"ReplicaUpdates"`: A list of replica update actions (create, delete, or update) for the - table. This property only applies to Version 2019.11.21 (Current) of global tables. + table. For global tables, this property only applies to global tables using Version + 2019.11.21 (Current version). - `"SSESpecification"`: The new server-side encryption settings for the specified table. - `"StreamSpecification"`: Represents the DynamoDB Streams configuration for the table. - You receive a ResourceInUseException if you try to enable a stream on a table that already - has a stream, or if you try to disable a stream on a table that doesn't have a stream. + You receive a ValidationException if you try to enable a stream on a table that already has + a stream, or if you try to disable a stream on a table that doesn't have a stream. - `"TableClass"`: The table class of the table to be updated. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. """ @@ -3100,11 +3415,12 @@ end update_table_replica_auto_scaling(table_name) update_table_replica_auto_scaling(table_name, params::Dict{String,<:Any}) -Updates auto scaling settings on your global tables at once. This operation only applies -to Version 2019.11.21 (Current) of global tables. +Updates auto scaling settings on your global tables at once. For global tables, this +operation only applies to global tables using Version 2019.11.21 (Current version). # Arguments -- `table_name`: The name of the global table to be updated. +- `table_name`: The name of the global table to be updated. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3161,7 +3477,8 @@ delete operation. For more information, see Time To Live in the Amazon DynamoDB Guide. # Arguments -- `table_name`: The name of the table to be configured. +- `table_name`: The name of the table to be configured. You can also provide the Amazon + Resource Name (ARN) of the table in this parameter. - `time_to_live_specification`: Represents the settings used to enable or disable Time to Live for the specified table. diff --git a/src/services/ebs.jl b/src/services/ebs.jl index 713052390c..fc47935585 100644 --- a/src/services/ebs.jl +++ b/src/services/ebs.jl @@ -10,7 +10,10 @@ using AWS.UUIDs Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks -to a snapshot after it has been completed. +to a snapshot after it has been completed. You should always retry requests that receive +server (5xx) error responses, and ThrottlingException and RequestThrottledException client +error responses. For more information see Error retries in the Amazon Elastic Compute Cloud +User Guide. # Arguments - `snapshot_id`: The ID of the snapshot. @@ -71,7 +74,10 @@ end get_snapshot_block(block_index, block_token, snapshot_id) get_snapshot_block(block_index, block_token, snapshot_id, params::Dict{String,<:Any}) -Returns the data in a block in an Amazon Elastic Block Store snapshot. +Returns the data in a block in an Amazon Elastic Block Store snapshot. You should always +retry requests that receive server (5xx) error responses, and ThrottlingException and +RequestThrottledException client error responses. For more information see Error retries in +the Amazon Elastic Compute Cloud User Guide. # Arguments - `block_index`: The block index of the block in which to read the data. A block index is a @@ -120,7 +126,10 @@ end list_changed_blocks(second_snapshot_id, params::Dict{String,<:Any}) Returns information about the blocks that are different between two Amazon Elastic Block -Store snapshots of the same volume/snapshot lineage. +Store snapshots of the same volume/snapshot lineage. You should always retry requests that +receive server (5xx) error responses, and ThrottlingException and RequestThrottledException +client error responses. For more information see Error retries in the Amazon Elastic +Compute Cloud User Guide. # Arguments - `second_snapshot_id`: The ID of the second snapshot to use for the comparison. The @@ -171,7 +180,10 @@ end list_snapshot_blocks(snapshot_id) list_snapshot_blocks(snapshot_id, params::Dict{String,<:Any}) -Returns information about the blocks in an Amazon Elastic Block Store snapshot. +Returns information about the blocks in an Amazon Elastic Block Store snapshot. You should +always retry requests that receive server (5xx) error responses, and ThrottlingException +and RequestThrottledException client error responses. For more information see Error +retries in the Amazon Elastic Compute Cloud User Guide. # Arguments - `snapshot_id`: The ID of the snapshot from which to get block indexes and block tokens. @@ -217,7 +229,10 @@ end Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state. Data written to a -snapshot must be aligned with 512-KiB sectors. +snapshot must be aligned with 512-KiB sectors. You should always retry requests that +receive server (5xx) error responses, and ThrottlingException and RequestThrottledException +client error responses. For more information see Error retries in the Amazon Elastic +Compute Cloud User Guide. # Arguments - `block_data`: The data to write to the block. The block data is not signed as part of the @@ -309,7 +324,9 @@ end Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes. After creating the snapshot, use PutSnapshotBlock to write blocks of -data to the snapshot. +data to the snapshot. You should always retry requests that receive server (5xx) error +responses, and ThrottlingException and RequestThrottledException client error responses. +For more information see Error retries in the Amazon Elastic Compute Cloud User Guide. # Arguments - `volume_size`: The size of the volume, in GiB. The maximum size is 65536 GiB (64 TiB). diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 2cd4b2809a..505cf5fd84 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -9,7 +9,7 @@ using AWS.UUIDs accept_address_transfer(address, params::Dict{String,<:Any}) Accepts an Elastic IP address transfer. For more information, see Accept a transferred -Elastic IP address in the Amazon Virtual Private Cloud User Guide. +Elastic IP address in the Amazon VPC User Guide. # Arguments - `address`: The Elastic IP address you are accepting for transfer. @@ -339,9 +339,17 @@ advertising the BYOIP CIDR, use WithdrawByoipCidr. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Asn"`: The public 2-byte or 4-byte ASN that you want to advertise. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: If you have Local Zones enabled, you can choose a network border + group for Local Zones when you provision and advertise a BYOIPv4 CIDR. Choose the network + border group carefully as the EIP and the Amazon Web Services resource it is associated + with must reside in the same network border group. You can provision BYOIP address ranges + to and advertise them in the following Local Zone network border groups: us-east-1-dfw-2 + us-west-2-lax-1 us-west-2-phx-2 You cannot provision or advertise BYOIPv6 address + ranges in Local Zones at this time. """ function advertise_byoip_cidr(Cidr; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -373,14 +381,14 @@ to a different Amazon Web Services account. You can allocate an Elastic IP addre address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see -Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide. If you -release an Elastic IP address, you might be able to recover it. You cannot recover an -Elastic IP address that you released after it is allocated to another Amazon Web Services -account. To attempt to recover an Elastic IP address that you released, specify it in this -operation. For more information, see Elastic IP Addresses in the Amazon Elastic Compute -Cloud User Guide. You can allocate a carrier IP address which is a public IP address from a -telecommunication carrier, to a network interface which resides in a subnet in a Wavelength -Zone (for example an EC2 instance). +Bring Your Own IP Addresses (BYOIP) in the Amazon EC2 User Guide. If you release an Elastic +IP address, you might be able to recover it. You cannot recover an Elastic IP address that +you released after it is allocated to another Amazon Web Services account. To attempt to +recover an Elastic IP address that you released, specify it in this operation. For more +information, see Elastic IP Addresses in the Amazon EC2 User Guide. You can allocate a +carrier IP address which is a public IP address from a telecommunication carrier, to a +network interface which resides in a subnet in a Wavelength Zone (for example an EC2 +instance). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -392,9 +400,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NetworkBorderGroup"`: A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. - Use DescribeAvailabilityZones to view the network border groups. You cannot use a network - border group with EC2 Classic. If you attempt this operation on EC2 Classic, you receive an - InvalidParameterCombination error. - `"PublicIpv4Pool"`: The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead. @@ -415,8 +420,8 @@ function allocate_address( end """ - allocate_hosts(availability_zone, quantity) - allocate_hosts(availability_zone, quantity, params::Dict{String,<:Any}) + allocate_hosts(availability_zone) + allocate_hosts(availability_zone, params::Dict{String,<:Any}) Allocates a Dedicated Host to your account. At a minimum, specify the supported instance type or instance family, the Availability Zone in which to allocate the host, and the @@ -424,11 +429,16 @@ number of hosts to allocate. # Arguments - `availability_zone`: The Availability Zone in which to allocate the Dedicated Host. -- `quantity`: The number of Dedicated Hosts to allocate to your account with these - parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AssetId"`: The IDs of the Outpost hardware assets on which to allocate the Dedicated + Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency + between your workloads. This parameter is supported only if you specify OutpostArn. If you + are allocating the Dedicated Hosts in a Region, omit this parameter. If you specify this + parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on + each specified hardware asset. If you specify both AssetIds and Quantity, then the value + for Quantity must be equal to the number of asset IDs specified. - `"HostMaintenance"`: Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. - `"HostRecovery"`: Indicates whether to enable or disable host recovery for the Dedicated @@ -440,12 +450,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys type only, omit this parameter and specify InstanceType instead. You cannot specify InstanceFamily and InstanceType in the same request. - `"OutpostArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on - which to allocate the Dedicated Host. + which to allocate the Dedicated Host. If you specify OutpostArn, you can optionally specify + AssetIds. If you are allocating the Dedicated Host in a Region, omit this parameter. - `"TagSpecification"`: The tags to apply to the Dedicated Host during creation. - `"autoPlacement"`: Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding - auto-placement and affinity in the Amazon EC2 User Guide. Default: on + auto-placement and affinity in the Amazon EC2 User Guide. Default: off - `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. - `"instanceType"`: Specifies the instance type to be supported by the Dedicated Hosts. If @@ -453,20 +464,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance type only. If you want the Dedicated Hosts to support multiple instance types in a specific instance family, omit this parameter and specify InstanceFamily instead. You cannot specify InstanceType and InstanceFamily in the same request. +- `"quantity"`: The number of Dedicated Hosts to allocate to your account with these + parameters. If you are allocating the Dedicated Hosts on an Outpost, and you specify + AssetIds, you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host + on each specified hardware asset. If you specify both AssetIds and Quantity, then the value + that you specify for Quantity must be equal to the number of asset IDs specified. """ -function allocate_hosts( - availabilityZone, quantity; aws_config::AbstractAWSConfig=global_aws_config() -) +function allocate_hosts(availabilityZone; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( "AllocateHosts", - Dict{String,Any}("availabilityZone" => availabilityZone, "quantity" => quantity); + Dict{String,Any}("availabilityZone" => availabilityZone); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function allocate_hosts( availabilityZone, - quantity, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -474,11 +487,7 @@ function allocate_hosts( "AllocateHosts", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "availabilityZone" => availabilityZone, "quantity" => quantity - ), - params, + _merge, Dict{String,Any}("availabilityZone" => availabilityZone), params ), ); aws_config=aws_config, @@ -502,6 +511,8 @@ with any other allocations from the same pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedCidr"`: Include a particular CIDR range that can be returned by the pool. + Allowed CIDRs are only allowed if using netmask length for allocation. - `"Cidr"`: The CIDR you would like to allocate from the IPAM pool. Note the following: If there is no DefaultNetmaskLength allocation rule set on the pool, you must specify either the NetmaskLength or the CIDR. If the DefaultNetmaskLength allocation rule is set @@ -509,7 +520,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DefaultNetmaskLength allocation rule will be ignored. Possible values: Any available IPv4 or IPv6 CIDR. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A description for the allocation. - `"DisallowedCidr"`: Exclude a particular CIDR range from being returned by the pool. Disallowed CIDRs are only allowed if using netmask length for allocation. @@ -626,12 +637,11 @@ Assigns one or more IPv6 addresses to the specified network interface. You can s or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and -the limit varies per instance type. For information, see IP Addresses Per Network Interface -Per Instance Type in the Amazon Elastic Compute Cloud User Guide. You must specify either -the IPv6 addresses or the IPv6 address count in the request. You can optionally use Prefix -Delegation on the network interface. You must specify either the IPV6 Prefix Delegation -prefixes, or the IPv6 Prefix Delegation count. For information, see Assigning prefixes to -Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide. +the limit varies per instance type. You must specify either the IPv6 addresses or the IPv6 +address count in the request. You can optionally use Prefix Delegation on the network +interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix +Delegation count. For information, see Assigning prefixes to network interfaces in the +Amazon EC2 User Guide. # Arguments - `network_interface_id`: The ID of the network interface. @@ -686,18 +696,16 @@ Assigns one or more secondary private IP addresses to the specified network inte can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance -type. For information about instance types, see Instance Types in the Amazon Elastic -Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP -Addresses in the Amazon Elastic Compute Cloud User Guide. When you move a secondary private -IP address to another network interface, any Elastic IP address that is associated with the -IP address is also moved. Remapping an IP address is an asynchronous operation. When you -move an IP address from one network interface to another, check -network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the -remapping is complete. You must specify either the IP addresses or the IP address count in -the request. You can optionally use Prefix Delegation on the network interface. You must -specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. -For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon -Elastic Compute Cloud User Guide. +type. For more information about Elastic IP addresses, see Elastic IP Addresses in the +Amazon EC2 User Guide. When you move a secondary private IP address to another network +interface, any Elastic IP address that is associated with the IP address is also moved. +Remapping an IP address is an asynchronous operation. When you move an IP address from one +network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance +metadata to confirm that the remapping is complete. You must specify either the IP +addresses or the IP address count in the request. You can optionally use Prefix Delegation +on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or +the IPv4 Prefix Delegation count. For information, see Assigning prefixes to network +interfaces in the Amazon EC2 User Guide. # Arguments - `network_interface_id`: The ID of the network interface. @@ -751,11 +759,11 @@ end assign_private_nat_gateway_address(nat_gateway_id) assign_private_nat_gateway_address(nat_gateway_id, params::Dict{String,<:Any}) -Assigns one or more private IPv4 addresses to a private NAT gateway. For more information, -see Work with NAT gateways in the Amazon Virtual Private Cloud User Guide. +Assigns private IPv4 addresses to a private NAT gateway. For more information, see Work +with NAT gateways in the Amazon VPC User Guide. # Arguments -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -860,7 +868,7 @@ associated with it. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -913,7 +921,7 @@ any existing instances and all new instances that you launch in that VPC use the You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance. For more -information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide. +information, see DHCP option sets in the Amazon VPC User Guide. # Arguments - `dhcp_options_id`: The ID of the DHCP options set, or default to associate no DHCP @@ -1117,6 +1125,50 @@ function associate_instance_event_window( ) end +""" + associate_ipam_byoasn(asn, cidr) + associate_ipam_byoasn(asn, cidr, params::Dict{String,<:Any}) + +Associates your Autonomous System Number (ASN) with a BYOIP CIDR that you own in the same +Amazon Web Services Region. For more information, see Tutorial: Bring your ASN to IPAM in +the Amazon VPC IPAM guide. After the association succeeds, the ASN is eligible for +advertisement. You can view the association with DescribeByoipCidrs. You can advertise the +CIDR with AdvertiseByoipCidr. + +# Arguments +- `asn`: A public 2-byte or 4-byte ASN. +- `cidr`: The BYOIP CIDR you want to associate with an ASN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function associate_ipam_byoasn(Asn, Cidr; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "AssociateIpamByoasn", + Dict{String,Any}("Asn" => Asn, "Cidr" => Cidr); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_ipam_byoasn( + Asn, + Cidr, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "AssociateIpamByoasn", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("Asn" => Asn, "Cidr" => Cidr), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_ipam_resource_discovery(ipam_id, ipam_resource_discovery_id) associate_ipam_resource_discovery(ipam_id, ipam_resource_discovery_id, params::Dict{String,<:Any}) @@ -1180,15 +1232,22 @@ end associate_nat_gateway_address(allocation_id, nat_gateway_id, params::Dict{String,<:Any}) Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT -gateway. For more information, see Work with NAT gateways in the Amazon Virtual Private -Cloud User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT -gateway. You can increase the limit by requesting a quota adjustment. For more information, -see Elastic IP address quotas in the Amazon Virtual Private Cloud User Guide. +gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By +default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can +increase the limit by requesting a quota adjustment. For more information, see Elastic IP +address quotas in the Amazon VPC User Guide. When you associate an EIP or secondary EIPs +with a public NAT gateway, the network border group of the EIPs must match the network +border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not +the same, the EIP will fail to associate. You can see the network border group for the +subnet's AZ by viewing the details of the subnet. Similarly, you can view the network +border group of an EIP by viewing the details of the EIP address. For more information +about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC +User Guide. # Arguments - `allocation_id`: The allocation IDs of EIPs that you want to associate with your NAT gateway. -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1239,7 +1298,7 @@ to your VPC with a route table in your VPC. This association causes traffic from or gateway to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table later. A route table can be associated with multiple subnets. For more information, see Route tables in -the Amazon Virtual Private Cloud User Guide. +the Amazon VPC User Guide. # Arguments - `route_table_id`: The ID of the route table. @@ -1278,30 +1337,32 @@ function associate_route_table( end """ - associate_subnet_cidr_block(ipv6_cidr_block, subnet_id) - associate_subnet_cidr_block(ipv6_cidr_block, subnet_id, params::Dict{String,<:Any}) + associate_subnet_cidr_block(subnet_id) + associate_subnet_cidr_block(subnet_id, params::Dict{String,<:Any}) Associates a CIDR block with your subnet. You can only associate a single IPv6 CIDR block -with your subnet. An IPv6 CIDR block must have a prefix length of /64. +with your subnet. # Arguments -- `ipv6_cidr_block`: The IPv6 CIDR block for your subnet. The subnet must have a /64 prefix - length. - `subnet_id`: The ID of your subnet. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Ipv6IpamPoolId"`: An IPv6 IPAM pool ID. +- `"Ipv6NetmaskLength"`: An IPv6 netmask length. +- `"ipv6CidrBlock"`: The IPv6 CIDR block for your subnet. """ function associate_subnet_cidr_block( - ipv6CidrBlock, subnetId; aws_config::AbstractAWSConfig=global_aws_config() + subnetId; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( "AssociateSubnetCidrBlock", - Dict{String,Any}("ipv6CidrBlock" => ipv6CidrBlock, "subnetId" => subnetId); + Dict{String,Any}("subnetId" => subnetId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function associate_subnet_cidr_block( - ipv6CidrBlock, subnetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1309,11 +1370,7 @@ function associate_subnet_cidr_block( return ec2( "AssociateSubnetCidrBlock", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ipv6CidrBlock" => ipv6CidrBlock, "subnetId" => subnetId), - params, - ), + mergewith(_merge, Dict{String,Any}("subnetId" => subnetId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1500,11 +1557,10 @@ end associate_trunk_interface(branch_interface_id, trunk_interface_id) associate_trunk_interface(branch_interface_id, trunk_interface_id, params::Dict{String,<:Any}) - This API action is currently in limited preview only. If you are interested in using this -feature, contact your account manager. Associates a branch network interface with a trunk -network interface. Before you create the association, run the create-network-interface -command and set --interface-type to trunk. You must also create a network interface for -each branch network interface that you want to associate with the trunk network interface. +Associates a branch network interface with a trunk network interface. Before you create the +association, use CreateNetworkInterface command and set the interface type to trunk. You +must also create a network interface for each branch network interface that you want to +associate with the trunk network interface. # Arguments - `branch_interface_id`: The ID of the branch network interface. @@ -1513,7 +1569,7 @@ each branch network interface that you want to associate with the trunk network # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to Ensure Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -1564,11 +1620,10 @@ end Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you -provisioned through bring your own IP addresses (BYOIP). The IPv6 CIDR block size is fixed -at /56. You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 -pool, or an Amazon-provided IPv6 CIDR block. For more information about associating CIDR -blocks with your VPC and applicable restrictions, see VPC and subnet sizing in the Amazon -Virtual Private Cloud User Guide. +provisioned through bring your own IP addresses (BYOIP). You must specify one of the +following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR +block. For more information about associating CIDR blocks with your VPC and applicable +restrictions, see IP addressing for your VPCs and subnets in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -1597,7 +1652,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys What is IPAM? in the Amazon VPC IPAM User Guide. - `"Ipv6Pool"`: The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. - `"amazonProvidedIpv6CidrBlock"`: Requests an Amazon-provided IPv6 CIDR block with a /56 - prefix length for the VPC. You cannot specify the range of IPv6 addresses, or the size of + prefix length for the VPC. You cannot specify the range of IPv6 addresses or the size of the CIDR block. """ function associate_vpc_cidr_block(vpcId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1623,22 +1678,20 @@ end attach_classic_link_vpc(security_group_id, instance_id, vpc_id) attach_classic_link_vpc(security_group_id, instance_id, vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more -of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC -at a time. You can only link an instance that's in the running state. An instance is -automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when -you restart it. After you've linked an instance, you cannot change the VPC security groups -that are associated with it. To change the security groups, you must first unlink the -instance, and then link it again. Linking your instance to a VPC is sometimes referred to -as attaching your instance. + This action is deprecated. Links an EC2-Classic instance to a ClassicLink-enabled VPC +through one or more of the VPC security groups. You cannot link an EC2-Classic instance to +more than one VPC at a time. You can only link an instance that's in the running state. An +instance is automatically unlinked from a VPC when it's stopped - you can link it to the +VPC again when you restart it. After you've linked an instance, you cannot change the VPC +security groups that are associated with it. To change the security groups, you must first +unlink the instance, and then link it again. Linking your instance to a VPC is sometimes +referred to as attaching your instance. # Arguments -- `security_group_id`: The ID of one or more of the VPC's security groups. You cannot - specify security groups from a different VPC. -- `instance_id`: The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. -- `vpc_id`: The ID of a ClassicLink-enabled VPC. +- `security_group_id`: The IDs of the security groups. You cannot specify security groups + from a different VPC. +- `instance_id`: The ID of the EC2-Classic instance. +- `vpc_id`: The ID of the ClassicLink-enabled VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1690,8 +1743,8 @@ end attach_internet_gateway(internet_gateway_id, vpc_id, params::Dict{String,<:Any}) Attaches an internet gateway or a virtual private gateway to a VPC, enabling connectivity -between the internet and the VPC. For more information about your VPC and internet gateway, -see the Amazon Virtual Private Cloud User Guide. +between the internet and the VPC. For more information, see Internet gateways in the Amazon +VPC User Guide. # Arguments - `internet_gateway_id`: The ID of the internet gateway. @@ -1813,7 +1866,7 @@ Amazon Web Services Verified Access instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -1864,16 +1917,15 @@ end Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name. Encrypted EBS volumes must be attached to instances that support -Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon -Elastic Compute Cloud User Guide. After you attach an EBS volume, you must make it -available. For more information, see Make an EBS volume available for use. If a volume has -an Amazon Web Services Marketplace product code: The volume can be attached only to a -stopped instance. Amazon Web Services Marketplace product codes are copied from the -volume to the instance. You must be subscribed to the product. The instance type and -operating system of the instance must support the product. For example, you can't detach a -volume from a Windows instance and attach it to a Linux instance. For more information, -see Attach an Amazon EBS volume to an instance in the Amazon Elastic Compute Cloud User -Guide. +Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon EBS +User Guide. After you attach an EBS volume, you must make it available. For more +information, see Make an EBS volume available for use. If a volume has an Amazon Web +Services Marketplace product code: The volume can be attached only to a stopped instance. + Amazon Web Services Marketplace product codes are copied from the volume to the instance. + You must be subscribed to the product. The instance type and operating system of the +instance must support the product. For example, you can't detach a volume from a Windows +instance and attach it to a Linux instance. For more information, see Attach an Amazon +EBS volume to an instance in the Amazon EBS User Guide. # Arguments - `device`: The device name (for example, /dev/sdh or xvdh). @@ -1993,7 +2045,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys grant all clients who successfully establish a VPN connection access to the network. Must be set to true if AccessGroupId is not specified. - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A brief description of the authorization rule. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -2043,16 +2095,19 @@ end authorize_security_group_egress(group_id) authorize_security_group_egress(group_id, params::Dict{String,<:Any}) -[VPC only] Adds the specified outbound (egress) rules to a security group for use with a -VPC. An outbound rule permits instances to send traffic to the specified IPv4 or IPv6 CIDR -address ranges, or to the instances that are associated with the specified source security -groups. When specifying an outbound rule for your security group in a VPC, the -IpPermissions must include a destination for the traffic. You specify a protocol for each -rule (for example, TCP). For the TCP and UDP protocols, you must also specify the -destination port or port range. For the ICMP protocol, you must also specify the ICMP type -and code. You can use -1 for the type or code to mean all types or all codes. Rule changes -are propagated to affected instances as quickly as possible. However, a small delay might -occur. For information about VPC security group quotas, see Amazon VPC quotas. +Adds the specified outbound (egress) rules to a security group. An outbound rule permits +instances to send traffic to the specified IPv4 or IPv6 address ranges, the IP address +ranges specified by a prefix list, or the instances that are associated with a source +security group. For more information, see Security group rules. You must specify exactly +one of the following destinations: an IPv4 or IPv6 address range, a prefix list, or a +security group. You must specify a protocol for each rule (for example, TCP). If the +protocol is TCP or UDP, you must also specify a port or port range. If the protocol is ICMP +or ICMPv6, you must also specify the ICMP type and code. Rule changes are propagated to +instances associated with the security group as quickly as possible. However, a small delay +might occur. For examples of rules that you can add to security groups for specific access +scenarios, see Security group rules for different use cases in the Amazon EC2 User Guide. +For information about security group quotas, see Amazon VPC quotas in the Amazon VPC User +Guide. # Arguments - `group_id`: The ID of the security group. @@ -2060,20 +2115,16 @@ occur. For information about VPC security group quotas, see Amazon VPC quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"TagSpecification"`: The tags applied to the security group rule. -- `"cidrIp"`: Not supported. Use a set of IP permissions to specify the CIDR. +- `"cidrIp"`: Not supported. Use IP permissions instead. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"fromPort"`: Not supported. Use a set of IP permissions to specify the port. -- `"ipPermissions"`: The sets of IP permissions. You can't specify a destination security - group and a CIDR IP address range in the same set of permissions. -- `"ipProtocol"`: Not supported. Use a set of IP permissions to specify the protocol name - or number. -- `"sourceSecurityGroupName"`: Not supported. Use a set of IP permissions to specify a - destination security group. -- `"sourceSecurityGroupOwnerId"`: Not supported. Use a set of IP permissions to specify a - destination security group. -- `"toPort"`: Not supported. Use a set of IP permissions to specify the port. +- `"fromPort"`: Not supported. Use IP permissions instead. +- `"ipPermissions"`: The permissions for the security group rules. +- `"ipProtocol"`: Not supported. Use IP permissions instead. +- `"sourceSecurityGroupName"`: Not supported. Use IP permissions instead. +- `"sourceSecurityGroupOwnerId"`: Not supported. Use IP permissions instead. +- `"toPort"`: Not supported. Use IP permissions instead. """ function authorize_security_group_egress( groupId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2101,57 +2152,49 @@ end authorize_security_group_ingress(params::Dict{String,<:Any}) Adds the specified inbound (ingress) rules to a security group. An inbound rule permits -instances to receive traffic from the specified IPv4 or IPv6 CIDR address range, or from -the instances that are associated with the specified destination security groups. When -specifying an inbound rule for your security group in a VPC, the IpPermissions must include -a source for the traffic. You specify a protocol for each rule (for example, TCP). For TCP -and UDP, you must also specify the destination port or port range. For ICMP/ICMPv6, you -must also specify the ICMP/ICMPv6 type and code. You can use -1 to mean all types or all -codes. Rule changes are propagated to instances within the security group as quickly as -possible. However, a small delay might occur. For more information about VPC security group -quotas, see Amazon VPC quotas. We are retiring EC2-Classic. We recommend that you migrate -from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in -the Amazon Elastic Compute Cloud User Guide. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CidrIp"`: The IPv4 address range, in CIDR format. You can't specify this parameter when - specifying a source security group. To specify an IPv6 address range, use a set of IP - permissions. Alternatively, use a set of IP permissions to specify multiple rules and a - description for the rule. +instances to receive traffic from the specified IPv4 or IPv6 address range, the IP address +ranges that are specified by a prefix list, or the instances that are associated with a +destination security group. For more information, see Security group rules. You must +specify exactly one of the following sources: an IPv4 or IPv6 address range, a prefix list, +or a security group. You must specify a protocol for each rule (for example, TCP). If the +protocol is TCP or UDP, you must also specify a port or port range. If the protocol is ICMP +or ICMPv6, you must also specify the ICMP/ICMPv6 type and code. Rule changes are propagated +to instances associated with the security group as quickly as possible. However, a small +delay might occur. For examples of rules that you can add to security groups for specific +access scenarios, see Security group rules for different use cases in the Amazon EC2 User +Guide. For more information about security group quotas, see Amazon VPC quotas in the +Amazon VPC User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CidrIp"`: The IPv4 address range, in CIDR format. To specify an IPv6 address range, use + IP permissions instead. To specify multiple rules and descriptions for the rules, use IP + permissions instead. - `"FromPort"`: If the protocol is TCP or UDP, this is the start of the port range. If the - protocol is ICMP, this is the type number. A value of -1 indicates all ICMP types. If you - specify all ICMP types, you must specify all ICMP codes. Alternatively, use a set of IP - permissions to specify multiple rules and a description for the rule. -- `"GroupId"`: The ID of the security group. You must specify either the security group ID - or the security group name in the request. For security groups in a nondefault VPC, you - must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. -- `"IpPermissions"`: The sets of IP permissions. + protocol is ICMP, this is the ICMP type or -1 (all ICMP types). To specify multiple rules + and descriptions for the rules, use IP permissions instead. +- `"GroupId"`: The ID of the security group. +- `"GroupName"`: [Default VPC] The name of the security group. For security groups for a + default VPC you can specify either the ID or the name of the security group. For security + groups for a nondefault VPC, you must specify the ID of the security group. +- `"IpPermissions"`: The permissions for the security group rules. - `"IpProtocol"`: The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). - To specify icmpv6, use a set of IP permissions. [VPC only] Use -1 to specify all protocols. - If you specify -1 or a protocol other than tcp, udp, or icmp, traffic on all ports is - allowed, regardless of any ports you specify. Alternatively, use a set of IP permissions to - specify multiple rules and a description for the rule. -- `"SourceSecurityGroupName"`: [EC2-Classic, default VPC] The name of the source security - group. You can't specify this parameter in combination with the following parameters: the - CIDR IP address range, the start of the port range, the IP protocol, and the end of the - port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with - a specific IP protocol and port range, use a set of IP permissions instead. For EC2-VPC, - the source security group must be in the same VPC. -- `"SourceSecurityGroupOwnerId"`: [nondefault VPC] The Amazon Web Services account ID for - the source security group, if the source security group is in a different account. You - can't specify this parameter in combination with the following parameters: the CIDR IP - address range, the IP protocol, the start of the port range, and the end of the port range. - Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific - IP protocol and port range, use a set of IP permissions instead. -- `"TagSpecification"`: [VPC Only] The tags applied to the security group rule. + To specify all protocols, use -1. To specify icmpv6, use IP permissions instead. If you + specify a protocol other than one of the supported values, traffic is allowed on all ports, + regardless of any ports that you specify. To specify multiple rules and descriptions for + the rules, use IP permissions instead. +- `"SourceSecurityGroupName"`: [Default VPC] The name of the source security group. The + rule grants full ICMP, UDP, and TCP access. To create a rule with a specific protocol and + port range, specify a set of IP permissions instead. +- `"SourceSecurityGroupOwnerId"`: The Amazon Web Services account ID for the source + security group, if the source security group is in a different account. The rule grants + full ICMP, UDP, and TCP access. To create a rule with a specific protocol and port range, + use IP permissions instead. +- `"TagSpecification"`: The tags applied to the security group rule. - `"ToPort"`: If the protocol is TCP or UDP, this is the end of the port range. If the - protocol is ICMP, this is the code. A value of -1 indicates all ICMP codes. If you specify - all ICMP types, you must specify all ICMP codes. Alternatively, use a set of IP permissions - to specify multiple rules and a description for the rule. + protocol is ICMP, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 + (all ICMP types), then the end port must be -1 (all ICMP codes). To specify multiple rules + and descriptions for the rules, use IP permissions instead. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -2186,7 +2229,7 @@ action is not applicable for Linux/Unix instances or Windows instances that are Amazon EBS. # Arguments -- `instance_id`: The ID of the instance to bundle. Type: String Default: None Required: Yes +- `instance_id`: The ID of the instance to bundle. Default: None - `storage`: The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error. @@ -2420,8 +2463,8 @@ any partially-created Amazon S3 objects. If the export task is complete or is in process of transferring the final disk image, the command fails and returns an error. # Arguments -- `export_task_id`: The ID of the export task. This is the ID returned by - CreateInstanceExportTask. +- `export_task_id`: The ID of the export task. This is the ID returned by the + CreateInstanceExportTask and ExportImage operations. """ function cancel_export_task(exportTaskId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2515,7 +2558,8 @@ end cancel_reserved_instances_listing(reserved_instances_listing_id, params::Dict{String,<:Any}) Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace. For -more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide. +more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User +Guide. # Arguments - `reserved_instances_listing_id`: The ID of the Reserved Instance listing. @@ -2561,10 +2605,12 @@ Fleet launches no new instances. You must also specify whether a canceled Spot F request should terminate its instances. If you choose to terminate the instances, the Spot Fleet request enters the cancelled_terminating state. Otherwise, the Spot Fleet request enters the cancelled_running state and the instances continue to run until they are -interrupted or you terminate them manually. +interrupted or you terminate them manually. Restrictions You can delete up to 100 +fleets in a single request. If you exceed the specified number, no fleets are deleted. # Arguments -- `spot_fleet_request_id`: The IDs of the Spot Fleet requests. +- `spot_fleet_request_id`: The IDs of the Spot Fleet requests. Constraint: You can specify + up to 100 IDs in a single request. - `terminate_instances`: Indicates whether to terminate the associated instances when the Spot Fleet request is canceled. The default is to terminate the instances. To let the instances continue to run after the Spot Fleet request is canceled, specify @@ -2778,7 +2824,7 @@ specify the ARN of the destination Outpost using DestinationOutpostArn. Backing copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on -Outposts in the Amazon EC2 User Guide. For more information about the prerequisites and +Outposts in the Amazon EBS User Guide. For more information about the prerequisites and limits when copying an AMI, see Copy an AMI in the Amazon EC2 User Guide. # Arguments @@ -2799,8 +2845,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the AMI. Only specify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. The AMI must be in the Region of the destination Outpost. You cannot copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. - For more information, see Copy AMIs from an Amazon Web Services Region to an Outpost in - the Amazon EC2 User Guide. + For more information, see Copy AMIs from an Amazon Web Services Region to an Outpost in the + Amazon EBS User Guide. +- `"TagSpecification"`: The tags to apply to the new AMI and new snapshots. You can tag the + AMI, the snapshots, or both. To tag the new AMI, the value for ResourceType must be + image. To tag the new snapshots, the value for ResourceType must be snapshot. The same + tag is applied to all the new snapshots. If you specify other values for ResourceType, + the request fails. To tag an AMI or snapshot after it has been created, see CreateTags. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -2808,7 +2859,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default Key Management Service (KMS) KMS key using KmsKeyId. For - more information, see Amazon EBS encryption in the Amazon EC2 User Guide. + more information, see Amazon EBS encryption in the Amazon EBS User Guide. - `"kmsKeyId"`: The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating encrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used. If you specify a KMS key, you must also @@ -2869,15 +2920,15 @@ another, or within the same Outpost. You can use the snapshot to create EBS volu Amazon Machine Images (AMIs). When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies -use the default Key Management Service (KMS) KMS key; however, you can specify a different -KMS key. To copy an encrypted snapshot that has been shared from another account, you must -have permissions for the KMS key used to encrypt the snapshot. Snapshots copied to an -Outpost are encrypted by default using the default encryption key for the Region, or a -different key that you specify in the request using KmsKeyId. Outposts do not support -unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the -Amazon Elastic Compute Cloud User Guide. Snapshots created by copying another snapshot have -an arbitrary volume ID that should not be used for any purpose. For more information, see -Copy an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide. +use the default KMS key; however, you can specify a different KMS key. To copy an encrypted +snapshot that has been shared from another account, you must have permissions for the KMS +key used to encrypt the snapshot. Snapshots copied to an Outpost are encrypted by default +using the default encryption key for the Region, or a different key that you specify in the +request using KmsKeyId. Outposts do not support unencrypted snapshots. For more +information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide. +Snapshots created by copying another snapshot have an arbitrary volume ID that should not +be used for any purpose. For more information, see Copy an Amazon EBS snapshot in the +Amazon EBS User Guide. # Arguments - `source_region`: The ID of the Region that contains the snapshot to be copied. @@ -2891,7 +2942,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Region to an Outpost. The snapshot must be in the Region for the destination Outpost. You cannot copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost. For more information, see Copy snapshots from an - Amazon Web Services Region to an Outpost in the Amazon Elastic Compute Cloud User Guide. + Amazon Web Services Region to an Outpost in the Amazon EBS User Guide. - `"TagSpecification"`: The tags to apply to the new snapshot. - `"destinationRegion"`: The destination Region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region @@ -2906,13 +2957,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys not enabled, enable encryption using this parameter. Otherwise, omit this parameter. Encrypted snapshots are encrypted, even if you omit this parameter and encryption by default is not enabled. You cannot set this parameter to false. For more information, see - Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide. -- `"kmsKeyId"`: The identifier of the Key Management Service (KMS) KMS key to use for - Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is - used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS - key using any of the following: Key ID. For example, - 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key - ARN. For example, + Amazon EBS encryption in the Amazon EBS User Guide. +- `"kmsKeyId"`: The identifier of the KMS key to use for Amazon EBS encryption. If this + parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + the encrypted state must be true. You can specify the KMS key using any of the following: + Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, + alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN @@ -2925,9 +2975,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) - in the Amazon Simple Storage Service API Reference. An invalid or improperly signed - PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will - move to an error state. + in the Amazon S3 API Reference. An invalid or improperly signed PresignedUrl will cause the + copy operation to fail asynchronously, and the snapshot will move to an error state. """ function copy_snapshot( SourceRegion, SourceSnapshotId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3097,7 +3146,7 @@ Reservation Fleet in the Amazon EC2 User Guide. Capacity Reservation Fleet. This value, together with the instance type weights that you assign to each instance type used by the Fleet determine the number of instances for which the Fleet reserves capacity. Both values are based on units that make sense for your - workload. For more information, see Total target capacity in the Amazon EC2 User Guide. + workload. For more information, see Total target capacity in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3245,7 +3294,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientLoginBannerOptions"`: Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is established. - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A brief description of the Client VPN endpoint. - `"DnsServers"`: Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address @@ -3340,7 +3389,7 @@ table specifies the path for traffic to specific resources or networks. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A brief description of the route. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -3501,12 +3550,21 @@ doesn't create a new customer gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BgpAsn"`: For devices that support BGP, the customer gateway's BGP ASN. Default: 65000 +- `"BgpAsn"`: For customer gateway devices that support BGP, specify the device's ASN. You + must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN + is larger than 2,147,483,647, you must use BgpAsnExtended. Default: 65000 Valid values: 1 + to 2,147,483,647 +- `"BgpAsnExtended"`: For customer gateway devices that support BGP, specify the device's + ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. + If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended. Valid values: + 2,147,483,648 to 4,294,967,295 - `"CertificateArn"`: The Amazon Resource Name (ARN) for the customer gateway certificate. - `"DeviceName"`: A name for the customer gateway device. Length Constraints: Up to 255 characters. -- `"IpAddress"`: IPv4 address for the customer gateway device's outside interface. The - address must be static. +- `"IpAddress"`: IPv4 address for the customer gateway device's outside interface. The + address must be static. If OutsideIpAddressType in your VPN connection options is set to + PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If + OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address. - `"PublicIp"`: This member has been deprecated. The Internet-routable IP address for the customer gateway's outside interface. The address must be static. - `"TagSpecification"`: The tags to apply to the customer gateway. @@ -3539,7 +3597,7 @@ end Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more -information, see Creating a default subnet in the Amazon Virtual Private Cloud User Guide. +information, see Create a default subnet in the Amazon VPC User Guide. # Arguments - `availability_zone`: The Availability Zone in which to create the default subnet. @@ -3586,15 +3644,9 @@ end Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default -VPC and default subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify -the components of the default VPC yourself. If you deleted your previous default VPC, you -can create a default VPC. You cannot have more than one default VPC per Region. If your -account supports EC2-Classic, you cannot use this action to create a default VPC in a -Region that supports EC2-Classic. If you want a default VPC in a Region that supports -EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that -possible?\" in the Default VPCs FAQ. We are retiring EC2-Classic. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon Elastic Compute Cloud User Guide. +VPCs in the Amazon VPC User Guide. You cannot specify the components of the default VPC +yourself. If you deleted your previous default VPC, you can create a default VPC. You +cannot have more than one default VPC per Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3617,31 +3669,32 @@ end create_dhcp_options(dhcp_configuration) create_dhcp_options(dhcp_configuration, params::Dict{String,<:Any}) -Creates a set of DHCP options for your VPC. After creating the set, you must associate it -with the VPC, causing all existing and new instances that you launch in the VPC to use this -set of DHCP options. The following are the individual DHCP options you can specify. For -more information about the options, see RFC 2132. domain-name-servers - The IP addresses -of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set -specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP -addresses in a single parameter, separated by commas. To have your instance receive a -custom DNS hostname as specified in domain-name, you must set domain-name-servers to a -custom DNS server. domain-name - If you're using AmazonProvidedDNS in us-east-1, specify -ec2.internal. If you're using AmazonProvidedDNS in another Region, specify -region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify -a domain name (for example, ExampleCompany.com). This value is used to complete unqualified -DNS hostnames. Important: Some Linux operating systems accept multiple domain names -separated by spaces. However, Windows and other Linux operating systems treat the value as -a single domain, which results in unexpected behavior. If your DHCP options set is -associated with a VPC that has instances with multiple operating systems, specify only one -domain name. ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) -servers. netbios-name-servers - The IP addresses of up to four NetBIOS name servers. -netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 -(broadcast and multicast are not currently supported). For more information about these -node types, see RFC 2132. Your VPC automatically starts out with a set of DHCP options -that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of -options, and if your VPC has an internet gateway, make sure to set the domain-name-servers -option either to AmazonProvidedDNS or to a domain name server of your choice. For more -information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide. +Creates a custom set of DHCP options. After you create a DHCP option set, you associate it +with a VPC. After you associate a DHCP option set with a VPC, all existing and newly +launched instances in the VPC use this set of DHCP options. The following are the +individual DHCP options you can specify. For more information, see DHCP option sets in the +Amazon VPC User Guide. domain-name - If you're using AmazonProvidedDNS in us-east-1, +specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, specify +region.compute.internal. Otherwise, specify a custom domain name. This value is used to +complete unqualified DNS hostnames. Some Linux operating systems accept multiple domain +names separated by spaces. However, Windows and other Linux operating systems treat the +value as a single domain, which results in unexpected behavior. If your DHCP option set is +associated with a VPC that has instances running operating systems that treat the value as +a single domain, specify only one domain name. domain-name-servers - The IP addresses of +up to four DNS servers, or AmazonProvidedDNS. To specify multiple domain name servers in a +single parameter, separate the IP addresses using commas. To have your instances receive +custom DNS hostnames as specified in domain-name, you must specify a custom DNS server. +ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP) servers (four +IPv4 addresses and four IPv6 addresses). netbios-name-servers - The IP addresses of up +to four NetBIOS name servers. netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). +We recommend that you specify 2. Broadcast and multicast are not supported. For more +information about NetBIOS node types, see RFC 2132. ipv6-address-preferred-lease-time - +A value (in seconds, minutes, hours, or years) for how frequently a running instance with +an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 +and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease +time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase +the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs +when half of the lease time has elapsed. # Arguments - `dhcp_configuration`: A DHCP configuration option. @@ -3695,7 +3748,7 @@ with your instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -3726,9 +3779,11 @@ end create_fleet(target_capacity_specification, item) create_fleet(target_capacity_specification, item, params::Dict{String,<:Any}) -Launches an EC2 Fleet. You can create a single EC2 Fleet that includes multiple launch -specifications that vary by instance type, AMI, Availability Zone, or subnet. For more -information, see EC2 Fleet in the Amazon EC2 User Guide. +Creates an EC2 Fleet that contains the configuration information for On-Demand Instances +and Spot Instances. Instances are launched immediately if there is available capacity. A +single EC2 Fleet can include multiple launch specifications that vary by instance type, +AMI, Availability Zone, or subnet. For more information, see EC2 Fleet in the Amazon EC2 +User Guide. # Arguments - `target_capacity_specification`: The number of units to request. @@ -3752,7 +3807,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys health checks in the Amazon EC2 User Guide. - `"SpotOptions"`: Describes the configuration of Spot Instances in an EC2 Fleet. - `"TagSpecification"`: The key-value pair for tagging the EC2 Fleet request on creation. - For more information, see Tagging your resources. If the fleet type is instant, specify a + For more information, see Tag your resources. If the fleet type is instant, specify a resource type of fleet to tag the fleet or instance to tag the instances at launch. If the fleet type is maintain or request, specify a resource type of fleet to tag the fleet. You cannot specify a resource type of instance. To tag instances at launch, specify the tags in @@ -3816,12 +3871,12 @@ end Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC. Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the -traffic flow. For more information, see Flow log records in the Amazon Virtual Private -Cloud User Guide. When publishing to CloudWatch Logs, flow log records are published to a -log group, and each network interface has a unique log stream in the log group. When -publishing to Amazon S3, flow log records for all of the monitored network interfaces are -published to a single log file object that is stored in the specified bucket. For more -information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide. +traffic flow. For more information, see Flow log records in the Amazon VPC User Guide. When +publishing to CloudWatch Logs, flow log records are published to a log group, and each +network interface has a unique log stream in the log group. When publishing to Amazon S3, +flow log records for all of the monitored network interfaces are published to a single log +file object that is stored in the specified bucket. For more information, see VPC Flow Logs +in the Amazon VPC User Guide. # Arguments - `resource_id`: The IDs of the resources to monitor. For example, if the resource type is @@ -3836,8 +3891,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DeliverCrossAccountRole"`: The ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. - `"DeliverLogsPermissionArn"`: The ARN of the IAM role that allows Amazon EC2 to publish - flow logs to a CloudWatch Logs log group in your account. This parameter is required if the - destination type is cloud-watch-logs and unsupported otherwise. + flow logs to the log destination. This parameter is required if the destination type is + cloud-watch-logs, or if the destination type is kinesis-data-firehose and the delivery + stream and the resources to monitor are in different accounts. - `"DestinationOptions"`: The destination options. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -3858,8 +3914,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see Flow log records in the Amazon VPC User Guide or Transit Gateway Flow Log records in the Amazon Web Services Transit Gateway Guide. - Specify the fields using the {field-id} format, separated by spaces. For the CLI, surround - this parameter value with single quotes on Linux or double quotes on Windows. + Specify the fields using the {field-id} format, separated by spaces. - `"LogGroupName"`: The name of a new or existing CloudWatch Logs log group where Amazon EC2 publishes your flow logs. This parameter is valid only if the destination type is cloud-watch-logs. @@ -3966,18 +4021,11 @@ end create_image(instance_id, name, params::Dict{String,<:Any}) Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running -or stopped. By default, when Amazon EC2 creates the new AMI, it reboots the instance so -that it can take snapshots of the attached volumes while data is at rest, in order to -ensure a consistent state. You can set the NoReboot parameter to true in the API request, -or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and -rebooting the instance. If you choose to bypass the shutdown and reboot process by setting -the NoReboot parameter to true in the API request, or by using the --no-reboot option in -the CLI, we can't guarantee the file system integrity of the created image. If you -customized your instance with instance store volumes or Amazon EBS volumes in addition to -the root device volume, the new AMI contains block device mapping information for those -volumes. When you launch an instance from this new AMI, the instance automatically launches -with those additional volumes. For more information, see Create an Amazon EBS-backed Linux -AMI in the Amazon Elastic Compute Cloud User Guide. +or stopped. If you customized your instance with instance store volumes or Amazon EBS +volumes in addition to the root device volume, the new AMI contains block device mapping +information for those volumes. When you launch an instance from this new AMI, the instance +automatically launches with those additional volumes. For more information, see Create an +Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -3994,21 +4042,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys same tag is applied to all of the snapshots that are created. If you specify other values for ResourceType, the request fails. To tag an AMI or snapshot after it has been created, see CreateTags. -- `"blockDeviceMapping"`: The block device mappings. This parameter cannot be used to +- `"blockDeviceMapping"`: The block device mappings. When using the CreateImage action: + You can't change the volume size using the VolumeSize parameter. If you want a different + volume size, you must first change the volume size of the source instance. You can't modify the encryption status of existing volumes or snapshots. To create an AMI with - encrypted snapshots, use the CopyImage action. + volumes or snapshots that have a different encryption status (for example, where the source + volume and snapshots are unencrypted, and you want to create an AMI with encrypted volumes + or snapshots), use the CopyImage action. The only option that can be changed for existing + mappings or snapshots is DeleteOnTermination. - `"description"`: A description for the new image. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"noReboot"`: By default, when Amazon EC2 creates the new AMI, it reboots the instance so - that it can take snapshots of the attached volumes while data is at rest, in order to - ensure a consistent state. You can set the NoReboot parameter to true in the API request, - or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and - rebooting the instance. If you choose to bypass the shutdown and reboot process by setting - the NoReboot parameter to true in the API request, or by using the --no-reboot option in - the CLI, we can't guarantee the file system integrity of the created image. Default: false - (follow standard reboot process) +- `"noReboot"`: Indicates whether or not the instance should be automatically rebooted + before creating the image. Specify one of the following values: true - The instance is + not rebooted before creating the image. This creates crash-consistent snapshots that + include only the data that has been written to the volumes at the time the snapshots are + created. Buffered data and data in memory that has not yet been written to the volumes is + not included in the snapshots. false - The instance is rebooted before creating the + image. This ensures that all buffered data and data in memory is written to the volumes + before the snapshots are created. Default: false """ function create_image(instanceId, name; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -4041,8 +4094,8 @@ end create_instance_connect_endpoint(subnet_id, params::Dict{String,<:Any}) Creates an EC2 Instance Connect Endpoint. An EC2 Instance Connect Endpoint allows you to -connect to a resource, without requiring the resource to have a public IPv4 address. For -more information, see Connect to your resources without requiring a public IPv4 address +connect to an instance, without requiring the instance to have a public IPv4 address. For +more information, see Connect to your instances without requiring a public IPv4 address using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide. # Arguments @@ -4150,9 +4203,9 @@ end create_instance_export_task(export_to_s3, instance_id, target_environment, params::Dict{String,<:Any}) Exports a running or stopped instance to an Amazon S3 bucket. For information about the -supported operating systems, image formats, and known limitations for the types of -instances you can export, see Exporting an instance as a VM Using VM Import/Export in the -VM Import/Export User Guide. +prerequisites for your Amazon S3 bucket, supported operating systems, image formats, and +known limitations for the types of instances you can export, see Exporting an instance as a +VM Using VM Import/Export in the VM Import/Export User Guide. # Arguments - `export_to_s3`: The format and location for an export instance task. @@ -4212,8 +4265,8 @@ end create_internet_gateway(params::Dict{String,<:Any}) Creates an internet gateway for use with a VPC. After creating the internet gateway, you -attach it to a VPC using AttachInternetGateway. For more information about your VPC and -internet gateway, see the Amazon Virtual Private Cloud User Guide. +attach it to a VPC using AttachInternetGateway. For more information, see Internet gateways +in the Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4251,7 +4304,7 @@ in the Amazon VPC IPAM User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A description for the IPAM. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required @@ -4265,6 +4318,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. +- `"Tier"`: IPAM is offered in a Free Tier and an Advanced Tier. For more information about + the features available in each tier and the costs associated with the tiers, see Amazon VPC + pricing > IPAM tab. """ function create_ipam(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -4331,7 +4387,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AwsService"`: Limits which service in Amazon Web Services that the pool can be used in. \"ec2\", for example, allows users to use space for Elastic IP addresses and VPCs. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A description for the IPAM pool. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required @@ -4354,6 +4410,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceIpamPoolId"`: The ID of the source IPAM pool. Use this option to create a pool within an existing pool. Note that the CIDR you provision for the pool within the source pool must be available in the source pool's CIDR range. +- `"SourceResource"`: The resource used to provision CIDRs to a resource planning pool. - `"TagSpecification"`: The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for @@ -4455,7 +4512,7 @@ see Add a scope in the Amazon VPC IPAM User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"Description"`: A description for the scope you're creating. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required @@ -4543,11 +4600,10 @@ end Creates a launch template. A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launch an -instance from a launch template in the Amazon Elastic Compute Cloud User Guide. If you want -to clone an existing launch template as the basis for creating a new launch template, you -can use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. -For more information, see Create a launch template from an existing launch template in the -Amazon Elastic Compute Cloud User Guide. +instance from a launch template in the Amazon EC2 User Guide. To clone an existing launch +template as the basis for a new launch template, use the Amazon EC2 console. The API, SDKs, +and CLI do not support cloning a template. For more information, see Create a launch +template from an existing launch template in the Amazon EC2 User Guide. # Arguments - `launch_template_data`: The information for the launch template. @@ -4562,7 +4618,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"TagSpecification"`: The tags to apply to the launch template on creation. To tag the - launch template, the resource type must be launch-template. To specify the tags for the + launch template, the resource type must be launch-template. To specify the tags for the resources that are created when an instance is launched, you must use the TagSpecifications parameter in the launch template data structure. - `"VersionDescription"`: A description for the first version of the launch template. @@ -4609,13 +4665,14 @@ end create_launch_template_version(launch_template_data) create_launch_template_version(launch_template_data, params::Dict{String,<:Any}) -Creates a new version of a launch template. You can specify an existing version of launch -template from which to base the new version. Launch template versions are numbered in the -order in which they are created. You cannot specify, change, or replace the numbering of -launch template versions. Launch templates are immutable; after you create a launch -template, you can't modify it. Instead, you can create a new version of the launch template -that includes any changes you require. For more information, see Modify a launch template -(manage launch template versions) in the Amazon Elastic Compute Cloud User Guide. +Creates a new version of a launch template. You must specify an existing launch template, +either by name or ID. You can determine whether the new version inherits parameters from a +source version, and add or overwrite parameters as needed. Launch template versions are +numbered in the order in which they are created. You can't specify, change, or replace the +numbering of launch template versions. Launch templates are immutable; after you create a +launch template, you can't modify it. Instead, you can create a new version of the launch +template that includes the changes that you require. For more information, see Modify a +launch template (manage launch template versions) in the Amazon EC2 User Guide. # Arguments - `launch_template_data`: The information for the launch template. @@ -4628,19 +4685,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the - LaunchTemplateId or the LaunchTemplateName, but not both. -- `"LaunchTemplateName"`: The name of the launch template. You must specify the - LaunchTemplateName or the LaunchTemplateId, but not both. +- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the launch + template ID or the launch template name, but not both. +- `"LaunchTemplateName"`: The name of the launch template. You must specify either the + launch template ID or the launch template name, but not both. - `"ResolveAlias"`: If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageID. For more information, see Use a - Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User - Guide. Default: false -- `"SourceVersion"`: The version number of the launch template version on which to base the - new version. The new version inherits the same launch parameters as the source version, - except for parameters that you specify in LaunchTemplateData. Snapshots applied to the - block device mapping are ignored when creating a new version unless they are explicitly - included. + Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. Default: false +- `"SourceVersion"`: The version of the launch template on which to base the new version. + Snapshots applied to the block device mapping are ignored when creating a new version + unless they are explicitly included. If you specify this parameter, the new version + inherits the launch parameters from the source version. If you specify additional launch + parameters for the new version, they overwrite any corresponding launch parameters + inherited from the source version. If you omit this parameter, the new version contains + only the launch parameters that you specify for the new version. - `"VersionDescription"`: A description for the version of the launch template. """ function create_launch_template_version( @@ -4889,7 +4947,7 @@ Each entry consists of a CIDR block and an optional description. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency - of the request. For more information, see Ensuring Idempotency. Constraints: Up to 255 + of the request. For more information, see Ensuring idempotency. Constraints: Up to 255 UTF-8 characters in length. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -4954,7 +5012,13 @@ private communication is routed across VPCs and on-premises networks through a t gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. When you create a public NAT gateway and assign it an EIP or +secondary EIPs, the network border group of the EIPs must match the network border group of +the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the NAT +gateway will fail to launch. You can see the network border group for the subnet's AZ by +viewing the details of the subnet. Similarly, you can view the network border group of an +EIP by viewing the details of the EIP address. For more information about network border +groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. # Arguments - `subnet_id`: The ID of the subnet in which to create the NAT gateway. @@ -4966,8 +5030,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it. - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to ensure idempotency. - Constraint: Maximum 64 ASCII characters. + idempotency of the request. For more information, see Ensuring idempotency. Constraint: + Maximum 64 ASCII characters. - `"ConnectivityType"`: Indicates whether the NAT gateway supports public or private connectivity. The default is public connectivity. - `"DryRun"`: Checks whether you have the required permissions for the action, without @@ -4975,16 +5039,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"PrivateIpAddress"`: The private IPv4 address to assign to the NAT gateway. If you don't provide an address, a private IPv4 address will be automatically assigned. -- `"SecondaryAllocationId"`: Secondary EIP allocation IDs. For more information about - secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud User - Guide. +- `"SecondaryAllocationId"`: Secondary EIP allocation IDs. For more information, see Create + a NAT gateway in the Amazon VPC User Guide. - `"SecondaryPrivateIpAddress"`: Secondary private IPv4 addresses. For more information - about secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud - User Guide. + about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. - `"SecondaryPrivateIpAddressCount"`: [Private NAT gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT gateway. For more information about - secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud User - Guide. + secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. - `"TagSpecification"`: The tags to assign to the NAT gateway. """ function create_nat_gateway(SubnetId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -5020,13 +5081,15 @@ end Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC. For more information, see -Network ACLs in the Amazon Virtual Private Cloud User Guide. +Network ACLs in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensuring idempotency. - `"TagSpecification"`: The tags to assign to the network ACL. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -5035,7 +5098,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys function create_network_acl(vpcId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( "CreateNetworkAcl", - Dict{String,Any}("vpcId" => vpcId); + Dict{String,Any}("vpcId" => vpcId, "ClientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5045,7 +5108,13 @@ function create_network_acl( ) return ec2( "CreateNetworkAcl", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("vpcId" => vpcId), params)); + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("vpcId" => vpcId, "ClientToken" => string(uuid4())), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5065,7 +5134,7 @@ and not number them one right after the other (for example, 101, 102, 103, ...). it easier to add a rule between existing ones without having to renumber the rules. After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one. For more information about network ACLs, see Network ACLs in the Amazon -Virtual Private Cloud User Guide. +VPC User Guide. # Arguments - `egress`: Indicates whether this is an egress rule (rule is applied to traffic leaving @@ -5267,10 +5336,8 @@ end create_network_interface(subnet_id, params::Dict{String,<:Any}) Creates a network interface in the specified subnet. The number of IP addresses you can -assign to a network interface varies by instance type. For more information, see IP -Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide. For -more information about network interfaces, see Elastic network interfaces in the Amazon -Elastic Compute Cloud User Guide. +assign to a network interface varies by instance type. For more information about network +interfaces, see Elastic network interfaces in the Amazon EC2 User Guide. # Arguments - `subnet_id`: The ID of the subnet to associate with the network interface. @@ -5278,7 +5345,21 @@ Elastic Compute Cloud User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. +- `"ConnectionTrackingSpecification"`: A connection tracking specification for the network + interface. +- `"EnablePrimaryIpv6"`: If you’re creating a network interface in a dual-stack or + IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 + address is an IPv6 GUA address associated with an ENI that you have enabled to use a + primary IPv6 address. Use this option if the instance that this ENI will be attached to + relies on its IPv6 address not changing. Amazon Web Services will automatically assign an + IPv6 address associated with the ENI attached to your instance to be the primary IPv6 + address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. + When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made + the primary IPv6 address until the instance is terminated or the network interface is + detached. If you have multiple IPv6 addresses associated with an ENI attached to your + instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with + the ENI becomes the primary IPv6 address. - `"InterfaceType"`: The type of network interface. The default is interface. The only supported values are interface, efa, and trunk. - `"Ipv4Prefix"`: The IPv4 prefixes assigned to the network interface. You can't specify @@ -5494,7 +5575,7 @@ Replaces the EBS-backed root volume for a running instance with a new volume tha restored to the original root volume's launch state, that is restored to a specific snapshot taken from the original root volume, or that is restored from an AMI that has the same key characteristics as that of the instance. For more information, see Replace a root -volume in the Amazon Elastic Compute Cloud User Guide. +volume in the Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance for which to replace the root volume. @@ -5571,7 +5652,8 @@ can create a Reserved Instance Marketplace listing of some or all of your Standa Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation. For -more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide. +more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User +Guide. # Arguments - `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of @@ -5700,8 +5782,8 @@ route table includes the following two IPv4 routes: 192.0.2.0/24 (goes to som 192.0.2.0/28 (goes to some target B) Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the -traffic. For more information about route tables, see Route tables in the Amazon Virtual -Private Cloud User Guide. +traffic. For more information about route tables, see Route tables in the Amazon VPC User +Guide. # Arguments - `route_table_id`: The ID of the route table for the route. @@ -5764,13 +5846,15 @@ end Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet. For more information, see Route tables in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensuring idempotency. - `"TagSpecification"`: The tags to assign to the route table. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -5779,7 +5863,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys function create_route_table(vpcId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( "CreateRouteTable", - Dict{String,Any}("vpcId" => vpcId); + Dict{String,Any}("vpcId" => vpcId, "ClientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5789,7 +5873,13 @@ function create_route_table( ) return ec2( "CreateRouteTable", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("vpcId" => vpcId), params)); + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("vpcId" => vpcId, "ClientToken" => string(uuid4())), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5803,32 +5893,26 @@ Creates a security group. A security group acts as a virtual firewall for your i control inbound and outbound traffic. For more information, see Amazon EC2 security groups in the Amazon Elastic Compute Cloud User Guide and Security groups for your VPC in the Amazon Virtual Private Cloud User Guide. When you create a security group, you specify a -friendly name of your choice. You can have a security group for use in EC2-Classic with the -same name as a security group for use in a VPC. However, you can't have two security groups -for use in EC2-Classic with the same name or two security groups for use in a VPC with the -same name. You have a default security group for use in EC2-Classic and a default security -group for use in your VPC. If you don't specify a security group when you launch an -instance, the instance is launched into the appropriate default security group. A default -security group includes a default rule that grants instances unrestricted network access to -each other. You can add or remove rules from your security groups using -AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, -and RevokeSecurityGroupEgress. For more information about VPC security group limits, see -Amazon VPC Limits. We are retiring EC2-Classic. We recommend that you migrate from -EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the -Amazon Elastic Compute Cloud User Guide. +friendly name of your choice. You can't have two security groups for the same VPC with the +same name. You have a default security group for use in your VPC. If you don't specify a +security group when you launch an instance, the instance is launched into the appropriate +default security group. A default security group includes a default rule that grants +instances unrestricted network access to each other. You can add or remove rules from your +security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, +RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. For more information about VPC +security group limits, see Amazon VPC Limits. # Arguments - `group_description`: A description for the security group. Constraints: Up to 255 - characters in length Constraints for EC2-Classic: ASCII characters Constraints for EC2-VPC: - a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* + characters in length Valid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* - `group_name`: The name of the security group. Constraints: Up to 255 characters in - length. Cannot start with sg-. Constraints for EC2-Classic: ASCII characters Constraints - for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* + length. Cannot start with sg-. Valid characters: a-z, A-Z, 0-9, spaces, and + ._-:/()#,@[]+=&;{}!* # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"TagSpecification"`: The tags to assign to the security group. -- `"VpcId"`: [EC2-VPC] The ID of the VPC. Required for EC2-VPC. +- `"VpcId"`: The ID of the VPC. Required for a nondefault VPC. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -5889,9 +5973,8 @@ device, we recommend that you stop the instance before taking the snapshot. Snap are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. You can tag your snapshots during creation. -For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud -User Guide. For more information, see Amazon Elastic Block Store and Amazon EBS encryption -in the Amazon Elastic Compute Cloud User Guide. +For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. For +more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide. # Arguments - `volume_id`: The ID of the Amazon EBS volume. @@ -5906,7 +5989,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in the Region for the Outpost. To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume. For more information, see Create local - snapshots from volumes on an Outpost in the Amazon Elastic Compute Cloud User Guide. + snapshots from volumes on an Outpost in the Amazon EBS User Guide. - `"TagSpecification"`: The tags to apply to the snapshot during creation. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -5969,7 +6052,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance. For more information, see Create multi-volume local snapshots from instances on an Outpost in the - Amazon Elastic Compute Cloud User Guide. + Amazon EBS User Guide. - `"TagSpecification"`: Tags to apply to every snapshot specified by the instance. """ function create_snapshots( @@ -6007,7 +6090,7 @@ end Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot -Instance data feed in the Amazon EC2 User Guide for Linux Instances. +Instance data feed in the Amazon EC2 User Guide. # Arguments - `bucket`: The name of the Amazon S3 bucket in which to store the Spot Instance data feed. @@ -6106,12 +6189,11 @@ can't change its CIDR block. The allowed size for an IPv4 subnet is between a /2 (16 IP addresses) and a /16 netmask (65,536 IP addresses). Amazon Web Services reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for your use. If you've associated an IPv6 CIDR block with your VPC, you can -associate an IPv6 CIDR block with a subnet when you create it. The allowed block size for -an IPv6 subnet is a /64 netmask. If you add more than one subnet to a VPC, they're set up -in a star topology with a logical router in the middle. When you stop an instance in a -subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with -no running instances (they're all stopped), but no remaining IP addresses available. For -more information, see Subnets in the Amazon Virtual Private Cloud User Guide. +associate an IPv6 CIDR block with a subnet when you create it. If you add more than one +subnet to a VPC, they're set up in a star topology with a logical router in the middle. +When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore +possible to have a subnet with no running instances (they're all stopped), but no remaining +IP addresses available. For more information, see Subnets in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -6122,17 +6204,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about - the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute - Cloud User Guide. To create a subnet in an Outpost, set this value to the Availability Zone - for the Outpost and specify the Outpost ARN. + the Regions that support Local Zones, see Available Local Zones. To create a subnet in an + Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost + ARN. - `"AvailabilityZoneId"`: The AZ ID or the Local Zone ID of the subnet. - `"CidrBlock"`: The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. This parameter is not supported for an IPv6 only subnet. -- `"Ipv6CidrBlock"`: The IPv6 network range for the subnet, in CIDR notation. The subnet - size must use a /64 prefix length. This parameter is required for an IPv6 only subnet. +- `"Ipv4IpamPoolId"`: An IPv4 IPAM pool ID for the subnet. +- `"Ipv4NetmaskLength"`: An IPv4 netmask length for the subnet. +- `"Ipv6CidrBlock"`: The IPv6 network range for the subnet, in CIDR notation. This + parameter is required for an IPv6 only subnet. +- `"Ipv6IpamPoolId"`: An IPv6 IPAM pool ID for the subnet. - `"Ipv6Native"`: Indicates whether to create an IPv6 only subnet. +- `"Ipv6NetmaskLength"`: An IPv6 netmask length for the subnet. - `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost ARN, you must also specify the Availability Zone of the Outpost subnet. - `"TagSpecification"`: The tags to assign to the subnet. @@ -6163,17 +6249,16 @@ end create_subnet_cidr_reservation(cidr, reservation_type, subnet_id) create_subnet_cidr_reservation(cidr, reservation_type, subnet_id, params::Dict{String,<:Any}) -Creates a subnet CIDR reservation. For information about subnet CIDR reservations, see -Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide. +Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in +the Amazon VPC User Guide and Assign prefixes to network interfaces in the Amazon EC2 User +Guide. # Arguments - `cidr`: The IPv4 or IPV6 CIDR range to reserve. -- `reservation_type`: The type of reservation. The following are valid values: prefix: - The Amazon EC2 Prefix Delegation feature assigns the IP addresses to network interfaces - that are associated with an instance. For information about Prefix Delegation, see Prefix - Delegation for Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User - Guide. explicit: You manually assign the IP addresses to resources that reside in your - subnet. +- `reservation_type`: The type of reservation. The reservation type determines how the + reserved IP addresses are assigned to resources. prefix - Amazon Web Services assigns + the reserved IP addresses to network interfaces. explicit - You assign the reserved IP + addresses to network interfaces. - `subnet_id`: The ID of the subnet. # Optional Parameters @@ -6343,6 +6428,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information about the protocol value, see Protocol Numbers on the Internet Assigned Numbers Authority (IANA) website. - `"SourcePortRange"`: The source port range. +- `"TagSpecification"`: Traffic Mirroring tags specifications. """ function create_traffic_mirror_filter_rule( DestinationCidrBlock, @@ -6433,11 +6519,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. If you do not want to mirror the entire packet, use the - PacketLength parameter to specify the number of bytes in each packet to mirror. + PacketLength parameter to specify the number of bytes in each packet to mirror. For + sessions with Network Load Balancer (NLB) Traffic Mirror targets the default PacketLength + will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater than 8500 will + result in an error response. - `"TagSpecification"`: The tags to assign to a Traffic Mirror session. - `"VirtualNetworkId"`: The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an - account-wide unique id is chosen at random. + account-wide unique ID is chosen at random. """ function create_traffic_mirror_session( NetworkInterfaceId, @@ -6643,8 +6732,8 @@ end Creates a Connect peer for a specified transit gateway Connect attachment between a transit gateway and an appliance. The peer address and transit gateway address must be the same IP -address family (IPv4 or IPv6). For more information, see Connect peers in the Transit -Gateways Guide. +address family (IPv4 or IPv6). For more information, see Connect peers in the Amazon Web +Services Transit Gateways Guide. # Arguments - `peer_address`: The peer IP address (GRE outer IP address) on the appliance side of the @@ -7171,7 +7260,7 @@ with an optional endpoint-level access policy. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access endpoint. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -7182,7 +7271,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the endpoint type is network-interface. - `"PolicyDocument"`: The Verified Access policy document. - `"SecurityGroupId"`: The IDs of the security groups to associate with the Verified Access - endpoint. + endpoint. Required if AttachmentType is set to vpc. +- `"SseSpecification"`: The options for server side encryption. - `"TagSpecification"`: The tags to assign to the Verified Access endpoint. """ function create_verified_access_endpoint( @@ -7257,12 +7347,13 @@ and use one common Verified Access policy. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access group. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"PolicyDocument"`: The Verified Access policy document. +- `"SseSpecification"`: The options for server side encryption. - `"TagSpecification"`: The tags to assign to the Verified Access group. """ function create_verified_access_group( @@ -7310,11 +7401,13 @@ application requests and grants access only when your security requirements are # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access instance. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"FIPSEnabled"`: Enable or disable support for Federal Information Processing Standards + (FIPS) on the instance. - `"TagSpecification"`: The tags to assign to the Verified Access instance. """ function create_verified_access_instance(; @@ -7356,7 +7449,7 @@ denying the application request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access trust provider. - `"DeviceOptions"`: The options for a device-based trust provider. This parameter is required when the provider type is device. @@ -7367,6 +7460,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"OidcOptions"`: The options for a OpenID Connect-compatible user-identity trust provider. This parameter is required when the provider type is user. +- `"SseSpecification"`: The options for server side encryption. - `"TagSpecification"`: The tags to assign to the Verified Access trust provider. - `"UserTrustProviderType"`: The type of user-based trust provider. This parameter is required when the provider type is user. @@ -7420,14 +7514,14 @@ You can create a new empty volume or restore a volume from an EBS snapshot. Any Services Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also -automatically encrypted. For more information, see Amazon EBS encryption in the Amazon -Elastic Compute Cloud User Guide. You can tag your volumes during creation. For more -information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User -Guide. For more information, see Create an Amazon EBS volume in the Amazon Elastic Compute -Cloud User Guide. +automatically encrypted. For more information, see Amazon EBS encryption in the Amazon EBS +User Guide. You can tag your volumes during creation. For more information, see Tag your +Amazon EC2 resources in the Amazon EC2 User Guide. For more information, see Create an +Amazon EBS volume in the Amazon EBS User Guide. # Arguments -- `availability_zone`: The Availability Zone in which to create the volume. +- `availability_zone`: The ID of the Availability Zone in which to create the volume. For + example, us-east-1a. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7437,17 +7531,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. The following are the supported values for each - volume type: gp3: 3,000-16,000 IOPS io1: 100-64,000 IOPS io2: 100-64,000 IOPS - io1 and io2 volumes support up to 64,000 IOPS only on Instances built on the Nitro System. - Other instance families support performance up to 32,000 IOPS. This parameter is required - for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not - supported for gp2, st1, sc1, or standard volumes. -- `"KmsKeyId"`: The identifier of the Key Management Service (KMS) KMS key to use for - Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is - used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS - key using any of the following: Key ID. For example, - 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key - ARN. For example, + volume type: gp3: 3,000 - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 + IOPS For io2 volumes, you can achieve up to 256,000 IOPS on instances built on the Nitro + System. On other instances, you can achieve performance up to 32,000 IOPS. This parameter + is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This + parameter is not supported for gp2, st1, sc1, or standard volumes. +- `"KmsKeyId"`: The identifier of the KMS key to use for Amazon EBS encryption. If this + parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + the encrypted state must be true. You can specify the KMS key using any of the following: + Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, + alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN @@ -7455,14 +7548,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MultiAttachEnabled"`: Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes - only. For more information, see Amazon EBS Multi-Attach in the Amazon Elastic Compute - Cloud User Guide. + only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. - `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. - `"Size"`: The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size. The following are the - supported volumes sizes for each volume type: gp2 and gp3: 1-16,384 io1 and io2: - 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 + supported volumes sizes for each volume type: gp2 and gp3: 1 - 16,384 GiB io1: 4 - + 16,384 GiB io2: 4 - 65,536 GiB st1 and sc1: 125 - 16,384 GiB standard: 1 - 1024 + GiB - `"SnapshotId"`: The snapshot from which to create the volume. You must specify either a snapshot ID or a volume size. - `"TagSpecification"`: The tags to apply to the volume during creation. @@ -7473,16 +7566,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys General Purpose SSD: gp2 | gp3 Provisioned IOPS SSD: io1 | io2 Throughput Optimized HDD: st1 Cold HDD: sc1 Magnetic: standard Throughput Optimized HDD (st1) and Cold HDD (sc1) volumes can't be used as boot volumes. For more information, see Amazon EBS - volume types in the Amazon Elastic Compute Cloud User Guide. Default: gp2 + volume types in the Amazon EBS User Guide. Default: gp2 - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"encrypted"`: Indicates whether the volume should be encrypted. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For - more information, see Encryption by default in the Amazon Elastic Compute Cloud User Guide. - Encrypted Amazon EBS volumes must be attached to instances that support Amazon EBS - encryption. For more information, see Supported instance types. + more information, see Encryption by default in the Amazon EBS User Guide. Encrypted Amazon + EBS volumes must be attached to instances that support Amazon EBS encryption. For more + information, see Supported instance types. """ function create_volume(AvailabilityZone; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -7519,16 +7612,16 @@ end create_vpc() create_vpc(params::Dict{String,<:Any}) -Creates a VPC with the specified CIDR blocks. For more information, see VPC CIDR blocks in -the Amazon Virtual Private Cloud User Guide. You can optionally request an IPv6 CIDR block -for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 -addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through +Creates a VPC with the specified CIDR blocks. For more information, see IP addressing for +your VPCs and subnets in the Amazon VPC User Guide. You can optionally request an IPv6 CIDR +block for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of +IPv6 addresses or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). By default, each instance that you launch in the VPC has the default DHCP options, which include only a default DNS server that we provide -(AmazonProvidedDNS). For more information, see DHCP option sets in the Amazon Virtual -Private Cloud User Guide. You can specify the instance tenancy value for the VPC when you -create it. You can't change this value for the VPC after you create it. For more -information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide. +(AmazonProvidedDNS). For more information, see DHCP option sets in the Amazon VPC User +Guide. You can specify the instance tenancy value for the VPC when you create it. You can't +change this value for the VPC after you create it. For more information, see Dedicated +Instances in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7582,14 +7675,14 @@ end create_vpc_endpoint(service_name, vpc_id) create_vpc_endpoint(service_name, vpc_id, params::Dict{String,<:Any}) -Creates a VPC endpoint for a specified service. An endpoint enables you to create a private -connection between your VPC and the service. The service may be provided by Amazon Web -Services, an Amazon Web Services Marketplace Partner, or another Amazon Web Services -account. For more information, see the Amazon Web Services PrivateLink Guide. +Creates a VPC endpoint. A VPC endpoint provides a private connection between the specified +VPC and the specified endpoint service. You can use an endpoint service provided by Amazon +Web Services, an Amazon Web Services Marketplace Partner, or another Amazon Web Services +account. For more information, see the Amazon Web Services PrivateLink User Guide. # Arguments -- `service_name`: The service name. -- `vpc_id`: The ID of the VPC for the endpoint. +- `service_name`: The name of the endpoint service. +- `vpc_id`: The ID of the VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7615,11 +7708,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ModifyVpcAttribute to set the VPC attributes. Default: true - `"RouteTableId"`: (Gateway endpoint) The route table IDs. - `"SecurityGroupId"`: (Interface endpoint) The IDs of the security groups to associate - with the endpoint network interface. If this parameter is not specified, we use the default - security group for the VPC. + with the endpoint network interfaces. If this parameter is not specified, we use the + default security group for the VPC. +- `"SubnetConfiguration"`: The subnet configurations for the endpoint. - `"SubnetId"`: (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in - which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you - can specify only one subnet. + which to create endpoint network interfaces. For a Gateway Load Balancer endpoint, you can + specify only one subnet. - `"TagSpecification"`: The tags to associate with the endpoint. - `"VpcEndpointType"`: The type of endpoint. Default: Gateway """ @@ -7659,9 +7753,9 @@ end Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS -topic to receive notifications. For more information, see Create a Topic in the Amazon -Simple Notification Service Developer Guide. You can create a connection notification for -interface endpoints only. +topic to receive notifications. For more information, see Creating an Amazon SNS topic in +the Amazon SNS Developer Guide. You can create a connection notification for interface +endpoints only. # Arguments - `connection_notification_arn`: The ARN of the SNS topic for the notifications. @@ -7773,8 +7867,8 @@ Requests a VPC peering connection between two VPCs: a requester VPC that you own accepter VPC with which to create the connection. The accepter VPC can belong to another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks. Limitations and rules -apply to a VPC peering connection. For more information, see the limitations section in the -VPC Peering Guide. The owner of the accepter VPC must accept the peering request to +apply to a VPC peering connection. For more information, see the VPC peering limitations in +the VPC Peering Guide. The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected. If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status @@ -8343,16 +8437,18 @@ instances. You must also specify whether a deleted EC2 Fleet should terminate it instances. If you choose to terminate the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running state, and the instances continue to run until they are interrupted or you terminate them manually. -For instant fleets, EC2 Fleet must terminate the instances when the fleet is deleted. A -deleted instant fleet with running instances is not supported. Restrictions You can -delete up to 25 instant fleets in a single request. If you exceed this number, no instant -fleets are deleted and an error is returned. There is no restriction on the number of -fleets of type maintain or request that can be deleted in a single request. Up to 1000 -instances can be terminated in a single request to delete instant fleets. For more +For instant fleets, EC2 Fleet must terminate the instances when the fleet is deleted. Up to +1000 instances can be terminated in a single request to delete instant fleets. A deleted +instant fleet with running instances is not supported. Restrictions You can delete up +to 25 fleets of type instant in a single request. You can delete up to 100 fleets of type +maintain or request in a single request. You can delete up to 125 fleets in a single +request, provided you do not exceed the quota for each fleet type, as specified above. If +you exceed the specified number of fleets to delete, no fleets are deleted. For more information, see Delete an EC2 Fleet in the Amazon EC2 User Guide. # Arguments -- `fleet_id`: The IDs of the EC2 Fleets. +- `fleet_id`: The IDs of the EC2 Fleets. Constraints: In a single request, you can specify + up to 25 instant fleet IDs and up to 100 maintain or request fleet IDs. - `terminate_instances`: Indicates whether to terminate the associated instances when the EC2 Fleet is deleted. The default is to terminate the instances. To let the instances continue to run after the EC2 Fleet is deleted, specify no-terminate-instances. Supported @@ -8666,6 +8762,11 @@ pool in the Amazon VPC IPAM User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Cascade"`: Enables you to quickly delete an IPAM pool and all resources within that + pool, including provisioned CIDRs, allocations, and other pools. You can only use this + option to delete pools in the private scope or pools in the public scope with a source + resource. A source resource is a resource used to provision CIDRs to a resource planning + pool. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -8813,10 +8914,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the - LaunchTemplateId or the LaunchTemplateName, but not both. +- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the launch + template ID or the launch template name, but not both. - `"LaunchTemplateName"`: The name of the launch template. You must specify either the - LaunchTemplateName or the LaunchTemplateId, but not both. + launch template ID or the launch template name, but not both. """ function delete_launch_template(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -8838,24 +8939,27 @@ end delete_launch_template_versions(launch_template_version) delete_launch_template_versions(launch_template_version, params::Dict{String,<:Any}) -Deletes one or more versions of a launch template. You cannot delete the default version of +Deletes one or more versions of a launch template. You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch -template using DeleteLaunchTemplate. +template using DeleteLaunchTemplate. You can delete up to 200 launch template versions in a +single request. To delete more than 200 versions in a single request, use +DeleteLaunchTemplate, which deletes the launch template and all of its versions. For more +information, see Delete a launch template version in the Amazon EC2 User Guide. # Arguments - `launch_template_version`: The version numbers of one or more launch template versions to - delete. + delete. You can specify up to 200 launch template version numbers. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the - LaunchTemplateId or the LaunchTemplateName, but not both. +- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the launch + template ID or the launch template name, but not both. - `"LaunchTemplateName"`: The name of the launch template. You must specify either the - LaunchTemplateName or the LaunchTemplateId, but not both. + launch template ID or the launch template name, but not both. """ function delete_launch_template_versions( LaunchTemplateVersion; aws_config::AbstractAWSConfig=global_aws_config() @@ -9740,17 +9844,15 @@ end delete_security_group(params::Dict{String,<:Any}) Deletes a security group. If you attempt to delete a security group that is associated with -an instance, or is referenced by another security group, the operation fails with -InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC. We are retiring -EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, -see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. +an instance or network interface or is referenced by another security group, the operation +fails with DependencyViolation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"GroupId"`: The ID of the security group. Required for a nondefault VPC. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You can specify - either the security group name or the security group ID. For security groups in a - nondefault VPC, you must specify the security group ID. +- `"GroupId"`: The ID of the security group. +- `"GroupName"`: [Default VPC] The name of the security group. You can specify either the + security group name or the security group ID. For security groups in a nondefault VPC, you + must specify the security group ID. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -9782,8 +9884,7 @@ needed for any other snapshot is removed. So regardless of which prior snapshots deleted, all active snapshots will have access to all the information needed to restore the volume. You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot. For -more information, see Delete an Amazon EBS snapshot in the Amazon Elastic Compute Cloud -User Guide. +more information, see Delete an Amazon EBS snapshot in the Amazon EBS User Guide. # Arguments - `snapshot_id`: The ID of the EBS snapshot. @@ -10545,8 +10646,10 @@ end delete_transit_gateway_route_table(transit_gateway_route_table_id) delete_transit_gateway_route_table(transit_gateway_route_table_id, params::Dict{String,<:Any}) -Deletes the specified transit gateway route table. You must disassociate the route table -from any transit gateway route tables before you can delete it. +Deletes the specified transit gateway route table. If there are any route tables associated +with the transit gateway route table, you must first run DisassociateRouteTable before you +can delete the transit gateway route table. This removes any route tables associated with +the transit gateway route table. # Arguments - `transit_gateway_route_table_id`: The ID of the transit gateway route table. @@ -10698,7 +10801,7 @@ Delete an Amazon Web Services Verified Access endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -10750,7 +10853,7 @@ Delete an Amazon Web Services Verified Access group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -10802,7 +10905,7 @@ Delete an Amazon Web Services Verified Access instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -10854,7 +10957,7 @@ Delete an Amazon Web Services Verified Access trust provider. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -10900,7 +11003,7 @@ end Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance). The volume can remain in the deleting state for several minutes. For more -information, see Delete an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide. +information, see Delete an Amazon EBS volume in the Amazon EBS User Guide. # Arguments - `volume_id`: The ID of the volume. @@ -10942,7 +11045,8 @@ Deletes the specified VPC. You must detach or delete all gateways and resources associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), -and so on. +and so on. When you delete the VPC, it deletes the VPC's default security group, network +ACL, and route table. # Arguments - `vpc_id`: The ID of the VPC. @@ -11109,7 +11213,7 @@ end Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the accepter VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state. -You cannot delete a VPC peering connection that's in the failed state. +You cannot delete a VPC peering connection that's in the failed or rejected state. # Arguments - `vpc_peering_connection_id`: The ID of the VPC peering connection. @@ -11329,6 +11433,51 @@ function deprovision_byoip_cidr( ) end +""" + deprovision_ipam_byoasn(asn, ipam_id) + deprovision_ipam_byoasn(asn, ipam_id, params::Dict{String,<:Any}) + +Deprovisions your Autonomous System Number (ASN) from your Amazon Web Services account. +This action can only be called after any BYOIP CIDR associations are removed from your +Amazon Web Services account with DisassociateIpamByoasn. For more information, see +Tutorial: Bring your ASN to IPAM in the Amazon VPC IPAM guide. + +# Arguments +- `asn`: An ASN. +- `ipam_id`: The IPAM ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function deprovision_ipam_byoasn( + Asn, IpamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DeprovisionIpamByoasn", + Dict{String,Any}("Asn" => Asn, "IpamId" => IpamId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deprovision_ipam_byoasn( + Asn, + IpamId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeprovisionIpamByoasn", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("Asn" => Asn, "IpamId" => IpamId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ deprovision_ipam_pool_cidr(ipam_pool_id) deprovision_ipam_pool_cidr(ipam_pool_id, params::Dict{String,<:Any}) @@ -11581,18 +11730,16 @@ end describe_account_attributes(params::Dict{String,<:Any}) Describes attributes of your Amazon Web Services account. The following are the supported -account attributes: supported-platforms: Indicates whether your account can launch -instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. default-vpc: The ID of the -default VPC for your account, or none. max-instances: This attribute is no longer -supported. The returned value does not reflect your actual vCPU limit for running On-Demand -Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic -Compute Cloud User Guide. vpc-max-security-groups-per-interface: The maximum number of -security groups that you can assign to a network interface. max-elastic-ips: The maximum -number of Elastic IP addresses that you can allocate for use with EC2-Classic. -vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for -use with EC2-VPC. We are retiring EC2-Classic on August 15, 2022. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon EC2 User Guide. +account attributes: default-vpc: The ID of the default VPC for your account, or none. +max-instances: This attribute is no longer supported. The returned value does not reflect +your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand +Instance Limits in the Amazon Elastic Compute Cloud User Guide. max-elastic-ips: The +maximum number of Elastic IP addresses that you can allocate. supported-platforms: This +attribute is deprecated. vpc-max-elastic-ips: The maximum number of Elastic IP addresses +that you can allocate. vpc-max-security-groups-per-interface: The maximum number of +security groups that you can assign to a network interface. The order of the elements in +the response, including those within nested structures, might vary. Applications should not +assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11622,13 +11769,13 @@ end describe_address_transfers(params::Dict{String,<:Any}) Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP -addresses in the Amazon Virtual Private Cloud User Guide. When you transfer an Elastic IP -address, there is a two-step handshake between the source and transfer Amazon Web Services -accounts. When the source account starts the transfer, the transfer account has seven days -to accept the Elastic IP address transfer. During those seven days, the source account can -view the pending transfer by using this action. After seven days, the transfer expires and -ownership of the Elastic IP address returns to the source account. Accepted transfers are -visible to the source account for three days after the transfers have been accepted. +addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a +two-step handshake between the source and transfer Amazon Web Services accounts. When the +source account starts the transfer, the transfer account has seven days to accept the +Elastic IP address transfer. During those seven days, the source account can view the +pending transfer by using this action. After seven days, the transfer expires and ownership +of the Elastic IP address returns to the source account. Accepted transfers are visible to +the source account for three days after the transfers have been accepted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11775,8 +11922,9 @@ end Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone. For more information about Availability Zones, Local -Zones, and Wavelength Zones, see Regions and zones in the Amazon Elastic Compute Cloud User -Guide. +Zones, and Wavelength Zones, see Regions and zones in the Amazon EC2 User Guide. The order +of the elements in the response, including those within nested structures, might vary. +Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11786,10 +11934,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Filter"`: The filters. group-name - For Availability Zones, use the Region name. For Local Zones, use the name of the group associated with the Local Zone (for example, us-west-2-lax-1) For Wavelength Zones, use the name of the group associated with the - Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). message - The Zone message. + Wavelength Zone (for example, us-east-1-wl1). message - The Zone message. opt-in-status - The opt-in status (opted-in | not-opted-in | opt-in-not-required). - parent-zoneID - The ID of the zone that handles some of the Local Zone and Wavelength Zone - control plane operations, such as API calls. parent-zoneName - The ID of the zone that + parent-zone-id - The ID of the zone that handles some of the Local Zone and Wavelength Zone + control plane operations, such as API calls. parent-zone-name - The ID of the zone that handles some of the Local Zone and Wavelength Zone control plane operations, such as API calls. region-name - The name of the Region for the Zone (for example, us-east-1). state - The state of the Availability Zone, the Local Zone, or the Wavelength Zone @@ -11864,7 +12012,9 @@ end Describes the specified bundle tasks or all of your bundle tasks. Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and -image manifest name you provided to the bundle task. +image manifest name you provided to the bundle task. The order of the elements in the +response, including those within nested structures, might vary. Applications should not +assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11939,6 +12089,74 @@ function describe_byoip_cidrs( ) end +""" + describe_capacity_block_offerings(capacity_duration_hours, instance_count, instance_type) + describe_capacity_block_offerings(capacity_duration_hours, instance_count, instance_type, params::Dict{String,<:Any}) + +Describes Capacity Block offerings available for purchase in the Amazon Web Services Region +that you're currently using. With Capacity Blocks, you purchase a specific instance type +for a period of time. + +# Arguments +- `capacity_duration_hours`: The number of hours for which to reserve Capacity Block. +- `instance_count`: The number of instances for which to reserve capacity. +- `instance_type`: The type of instance for which the Capacity Block offering reserves + capacity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"EndDateRange"`: The latest end date for the Capacity Block offering. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. +- `"NextToken"`: The token to use to retrieve the next page of results. +- `"StartDateRange"`: The earliest start date for the Capacity Block offering. +""" +function describe_capacity_block_offerings( + CapacityDurationHours, + InstanceCount, + InstanceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DescribeCapacityBlockOfferings", + Dict{String,Any}( + "CapacityDurationHours" => CapacityDurationHours, + "InstanceCount" => InstanceCount, + "InstanceType" => InstanceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_capacity_block_offerings( + CapacityDurationHours, + InstanceCount, + InstanceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DescribeCapacityBlockOfferings", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CapacityDurationHours" => CapacityDurationHours, + "InstanceCount" => InstanceCount, + "InstanceType" => InstanceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_capacity_reservation_fleets() describe_capacity_reservation_fleets(params::Dict{String,<:Any}) @@ -11957,10 +12175,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported. tenancy - The tenancy of the Fleet (default | dedicated). allocation-strategy - The allocation strategy used by the Fleet. Only prioritized is supported. -- `"MaxResults"`: The maximum number of results to return for the request in a single page. - The remaining results can be seen by sending another request with the returned nextToken - value. This value can be between 5 and 500. If maxResults is given a larger value than 500, - you receive an error. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"NextToken"`: The token to use to retrieve the next page of results. """ function describe_capacity_reservation_fleets(; @@ -12033,10 +12250,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity. placement-group-arn - The ARN of the cluster placement group in which the Capacity Reservation was created. -- `"MaxResults"`: The maximum number of results to return for the request in a single page. - The remaining results can be seen by sending another request with the returned nextToken - value. This value can be between 5 and 500. If maxResults is given a larger value than 500, - you receive an error. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"NextToken"`: The token to use to retrieve the next page of results. """ function describe_capacity_reservations(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -12103,25 +12319,21 @@ end describe_classic_link_instances() describe_classic_link_instances(params::Dict{String,<:Any}) -Describes one or more of your linked EC2-Classic instances. This request only returns -information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use -this request to return information about other instances. We are retiring EC2-Classic. We -recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Describes your linked EC2-Classic instances. This request only +returns information about EC2-Classic instances linked to a VPC through ClassicLink. You +cannot use this request to return information about other instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. group-id - The ID of a VPC security group that's - associated with the instance. instance-id - The ID of the instance. tag:<key> - - The key/value combination of a tag assigned to the resource. Use the tag key in the filter - name and the tag value as the filter value. For example, to find all resources that have a - tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA - for the filter value. tag-key - The key of a tag assigned to the resource. Use this - filter to find all resources assigned a tag with a specific key, regardless of the tag - value. vpc-id - The ID of the VPC to which the instance is linked. vpc-id - The ID of - the VPC that the instance is linked to. -- `"InstanceId"`: One or more instance IDs. Must be instances linked to a VPC through - ClassicLink. +- `"Filter"`: The filters. group-id - The ID of a VPC security group that's associated + with the instance. instance-id - The ID of the instance. tag:<key> - The + key/value combination of a tag assigned to the resource. Use the tag key in the filter name + and the tag value as the filter value. For example, to find all resources that have a tag + with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for + the filter value. tag-key - The key of a tag assigned to the resource. Use this filter + to find all resources assigned a tag with a specific key, regardless of the tag value. + vpc-id - The ID of the VPC to which the instance is linked. +- `"InstanceId"`: The instance IDs. Must be instances linked to a VPC through ClassicLink. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -12499,22 +12711,23 @@ end describe_dhcp_options() describe_dhcp_options(params::Dict{String,<:Any}) -Describes one or more of your DHCP options sets. For more information, see DHCP options -sets in the Amazon Virtual Private Cloud User Guide. +Describes your DHCP option sets. The default is to describe all your DHCP option sets. +Alternatively, you can specify specific DHCP option set IDs or filter the results to +include only the DHCP option sets that match specific criteria. For more information, see +DHCP option sets in the Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DhcpOptionsId"`: The IDs of one or more DHCP options sets. Default: Describes all your - DHCP options sets. -- `"Filter"`: One or more filters. dhcp-options-id - The ID of a DHCP options set. - key - The key for one of the options (for example, domain-name). value - The value for - one of the options. owner-id - The ID of the Amazon Web Services account that owns the - DHCP options set. tag:<key> - The key/value combination of a tag assigned to the - resource. Use the tag key in the filter name and the tag value as the filter value. For - example, to find all resources that have a tag with the key Owner and the value TeamA, - specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key - of a tag assigned to the resource. Use this filter to find all resources assigned a tag - with a specific key, regardless of the tag value. +- `"DhcpOptionsId"`: The IDs of DHCP option sets. +- `"Filter"`: The filters. dhcp-options-id - The ID of a DHCP options set. key - The + key for one of the options (for example, domain-name). value - The value for one of the + options. owner-id - The ID of the Amazon Web Services account that owns the DHCP options + set. tag:<key> - The key/value combination of a tag assigned to the resource. Use + the tag key in the filter name and the tag value as the filter value. For example, to find + all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for + the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to + the resource. Use this filter to find all resources assigned a tag with a specific key, + regardless of the tag value. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -12544,20 +12757,23 @@ end describe_egress_only_internet_gateways() describe_egress_only_internet_gateways(params::Dict{String,<:Any}) -Describes one or more of your egress-only internet gateways. +Describes your egress-only internet gateways. The default is to describe all your +egress-only internet gateways. Alternatively, you can specify specific egress-only internet +gateway IDs or filter the results to include only the egress-only internet gateways that +match specific criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"EgressOnlyInternetGatewayId"`: One or more egress-only internet gateway IDs. -- `"Filter"`: One or more filters. tag:<key> - The key/value combination of a tag - assigned to the resource. Use the tag key in the filter name and the tag value as the - filter value. For example, to find all resources that have a tag with the key Owner and the - value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. - tag-key - The key of a tag assigned to the resource. Use this filter to find all resources - assigned a tag with a specific key, regardless of the tag value. +- `"EgressOnlyInternetGatewayId"`: The IDs of the egress-only internet gateways. +- `"Filter"`: The filters. tag:<key> - The key/value combination of a tag assigned + to the resource. Use the tag key in the filter name and the tag value as the filter value. + For example, to find all resources that have a tag with the key Owner and the value TeamA, + specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key + of a tag assigned to the resource. Use this filter to find all resources assigned a tag + with a specific key, regardless of the tag value. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -12588,8 +12804,9 @@ end describe_elastic_gpus() describe_elastic_gpus(params::Dict{String,<:Any}) -Describes the Elastic Graphics accelerator associated with your instances. For more -information about Elastic Graphics, see Amazon Elastic Graphics. + Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require +graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances. +Describes the Elastic Graphics accelerator associated with your instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -12688,7 +12905,7 @@ end describe_fast_launch_images() describe_fast_launch_images(params::Dict{String,<:Any}) -Describe details for Windows AMIs that are configured for faster launching. +Describe details for Windows AMIs that are configured for Windows fast launch. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -12696,11 +12913,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"Filter"`: Use the following filters to streamline results. resource-type - The - resource type for pre-provisioning. launch-template - The launch template that is - associated with the pre-provisioned Windows AMI. owner-id - The owner ID for the - pre-provisioning resource. state - The current state of fast launching for the Windows - AMI. -- `"ImageId"`: Details for one or more Windows AMI image IDs. + resource type for pre-provisioning. owner-id - The owner ID for the pre-provisioning + resource. state - The current state of fast launching for the Windows AMI. +- `"ImageId"`: Specify one or more Windows AMI image IDs for the request. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -12826,8 +13041,10 @@ end describe_fleet_instances(fleet_id) describe_fleet_instances(fleet_id, params::Dict{String,<:Any}) -Describes the running instances for the specified EC2 Fleet. For more information, see -Monitor your EC2 Fleet in the Amazon EC2 User Guide. +Describes the running instances for the specified EC2 Fleet. Currently, +DescribeFleetInstances does not support fleets of type instant. Instead, use +DescribeFleets, specifying the instant fleet ID in the request. For more information, see +Describe your EC2 Fleet in the Amazon EC2 User Guide. # Arguments - `fleet_id`: The ID of the EC2 Fleet. @@ -12869,8 +13086,10 @@ end describe_fleets() describe_fleets(params::Dict{String,<:Any}) -Describes the specified EC2 Fleets or all of your EC2 Fleets. For more information, see -Monitor your EC2 Fleet in the Amazon EC2 User Guide. +Describes the specified EC2 Fleet or all of your EC2 Fleets. If a fleet is of type +instant, you must specify the fleet ID in the request, otherwise the fleet does not appear +in the response. For more information, see Describe your EC2 Fleet in the Amazon EC2 User +Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13308,7 +13527,9 @@ end describe_image_attribute(attribute, image_id, params::Dict{String,<:Any}) Describes the specified attribute of the specified AMI. You can specify only one attribute -at a time. +at a time. The order of the elements in the response, including those within nested +structures, might vary. Applications should not assume the elements appear in a particular +order. # Arguments - `attribute`: The AMI attribute. Note: The blockDeviceMapping attribute is deprecated. @@ -13362,7 +13583,10 @@ you own, and private images owned by other Amazon Web Services accounts for whic explicit launch permissions. Recently deregistered images appear in the returned results for a short interval and then return empty results. After all instances that reference a deregistered AMI are terminated, specifying the ID of the image will eventually return an -error indicating that the AMI ID cannot be found. +error indicating that the AMI ID cannot be found. We strongly recommend using only +paginated requests. Unpaginated requests are susceptible to throttling and timeouts. The +order of the elements in the response, including those within nested structures, might +vary. Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13374,9 +13598,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you specify self or your own Amazon Web Services account ID, AMIs shared with your account are returned. In addition, AMIs that are shared with the organization or OU of which you are member are also returned. If you specify all, all public AMIs are returned. -- `"Filter"`: The filters. architecture - The image architecture (i386 | x86_64 | - arm64). block-device-mapping.delete-on-termination - A Boolean value that indicates - whether the Amazon EBS volume is deleted on instance termination. +- `"Filter"`: The filters. architecture - The image architecture (i386 | x86_64 | arm64 + | x86_64_mac | arm64_mac). block-device-mapping.delete-on-termination - A Boolean value + that indicates whether the Amazon EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume. block-device-mapping.volume-size - The volume @@ -13401,20 +13625,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys product-code.type - The type of the product code (marketplace). ramdisk-id - The RAM disk ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | - instance-store). state - The state of the image (available | pending | failed). - state-reason-code - The reason code for the state change. state-reason-message - The - message for the state change. sriov-net-support - A value of simple indicates that - enhanced networking with the Intel 82599 VF interface is enabled. tag:<key> - The - key/value combination of a tag assigned to the resource. Use the tag key in the filter name - and the tag value as the filter value. For example, to find all resources that have a tag - with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - the filter value. tag-key - The key of a tag assigned to the resource. Use this filter - to find all resources assigned a tag with a specific key, regardless of the tag value. - virtualization-type - The virtualization type (paravirtual | hvm). + instance-store). source-instance-id - The ID of the instance that the AMI was created + from if the AMI was created using CreateImage. This filter is applicable only if the AMI + was created using CreateImage. state - The state of the image (available | pending | + failed). state-reason-code - The reason code for the state change. + state-reason-message - The message for the state change. sriov-net-support - A value of + simple indicates that enhanced networking with the Intel 82599 VF interface is enabled. + tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag + key in the filter name and the tag value as the filter value. For example, to find all + resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the + filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the + resource. Use this filter to find all resources assigned a tag with a specific key, + regardless of the tag value. virtualization-type - The virtualization type (paravirtual + | hvm). - `"ImageId"`: The image IDs. Default: Describes all images available to you. - `"IncludeDeprecated"`: Specifies whether to include deprecated AMIs. Default: No deprecated AMIs are included in the response. If you are the AMI owner, all deprecated AMIs appear in the response regardless of what you specify for this parameter. +- `"IncludeDisabled"`: Specifies whether to include disabled AMIs. Default: No disabled + AMIs are included in the response. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -13770,7 +13999,9 @@ Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your ins related to hardware issues, software updates, or system maintenance. For more information, see Scheduled events for your instances in the Amazon EC2 User Guide. Instance state - You can manage your instances from the moment you launch them through their termination. -For more information, see Instance lifecycle in the Amazon EC2 User Guide. +For more information, see Instance lifecycle in the Amazon EC2 User Guide. The order of +the elements in the response, including those within nested structures, might vary. +Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13824,24 +14055,79 @@ function describe_instance_status( ) end +""" + describe_instance_topology() + describe_instance_topology(params::Dict{String,<:Any}) + +Describes a tree-based hierarchy that represents the physical host placement of your EC2 +instances within an Availability Zone or Local Zone. You can use this information to +determine the relative proximity of your EC2 instances within the Amazon Web Services +network to support your tightly coupled workloads. Limitations Supported zones +Availability Zone Local Zone Supported instance types hpc6a.48xlarge | +hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | +hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | +p4de.24xlarge | p5.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more +information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: The filters. availability-zone - The name of the Availability Zone (for + example, us-west-2a) or Local Zone (for example, us-west-2-lax-1b) that the instance is in. + instance-type - The instance type (for example, p4d.24xlarge) or instance family (for + example, p4d*). You can use the * wildcard to match zero or more characters, or the ? + wildcard to match zero or one character. zone-id - The ID of the Availability Zone (for + example, usw2-az2) or Local Zone (for example, usw2-lax1-az1) that the instance is in. +- `"GroupName"`: The name of the placement group that each instance is in. Constraints: + Maximum 100 explicitly specified placement group names. +- `"InstanceId"`: The instance IDs. Default: Describes all your instances. Constraints: + Maximum 100 explicitly specified instance IDs. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. You can't specify this parameter and the instance IDs + parameter in the same request. Default: 20 +- `"NextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function describe_instance_topology(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "DescribeInstanceTopology"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_instance_topology( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeInstanceTopology", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_instance_type_offerings() describe_instance_type_offerings(params::Dict{String,<:Any}) -Returns a list of all instance types offered. The results can be filtered by location -(Region or Availability Zone). If no location is specified, the instance types offered in -the current Region are returned. +Lists the instance types that are offered for the specified location. If no location is +specified, the default is to list the instance types that are offered in the current Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Filter"`: One or more filters. Filter names and values are case-sensitive. location - - This depends on the location type. For example, if the location type is region (default), - the location is the Region code (for example, us-east-2.) instance-type - The instance - type. For example, c5.2xlarge. -- `"LocationType"`: The location type. +- `"Filter"`: One or more filters. Filter names and values are case-sensitive. + instance-type - The instance type. For a list of possible values, see Instance. location + - The location. For a list of possible identifiers, see Regions and Zones. +- `"LocationType"`: The location type. availability-zone - The Availability Zone. When + you specify a location filter, it must be an Availability Zone for the current Region. + availability-zone-id - The AZ ID. When you specify a location filter, it must be an AZ ID + for the current Region. outpost - The Outpost ARN. When you specify a location filter, + it must be an Outpost ARN for the current Region. region - The current Region. If you + specify a location filter, it must match the current Region. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -13872,8 +14158,8 @@ end describe_instance_types() describe_instance_types(params::Dict{String,<:Any}) -Describes the details of the instance types that are offered in a location. The results can -be filtered by the attributes of the instance types. +Describes the specified instance types. By default, all instance types for the current +Region are described. Alternatively, you can filter the results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13883,11 +14169,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Filter"`: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance - type (true | false). burstable-performance-supported - Indicates whether it is a - burstable performance instance type (true | false). current-generation - Indicates - whether this instance type is the latest generation instance type of an instance family - (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline - bandwidth performance for an EBS-optimized instance type, in Mbps. + type (true | false). burstable-performance-supported - Indicates whether the instance + type is a burstable performance T instance type (true | false). current-generation - + Indicates whether this instance type is the latest generation instance type of an instance + family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The + baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput @@ -13929,8 +14215,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, \"25 - Gigabit\"). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | - x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. + Gigabit\"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported + (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported + (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM + version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | + i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in + GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot). supported-virtualization-type - The virtualization type (hvm | @@ -13940,8 +14230,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\". -- `"InstanceType"`: The instance types. For more information, see Instance types in the - Amazon EC2 User Guide. +- `"InstanceType"`: The instance types. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -13980,7 +14269,10 @@ is usually less than one hour. If you describe instances in the rare case where Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call -works normally. +works normally. We strongly recommend using only paginated requests. Unpaginated requests +are susceptible to throttling and timeouts. The order of the elements in the response, +including those within nested structures, might vary. Applications should not assume the +elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13988,23 +14280,39 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the - instance, for example, 2010-09-15T17:15:20.000Z. + instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. - capacity-reservation-id - The ID of the Capacity Reservation into which the instance was - launched. client-token - The idempotency token you provided when you launched the - instance. dns-name - The public DNS name of the instance. + boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | + uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which + the instance was launched. + capacity-reservation-specification.capacity-reservation-preference - The instance's + Capacity Reservation preference (open | none). + capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - + The ID of the targeted Capacity Reservation. + capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource + -group-arn - The ARN of the targeted Capacity Reservation group. client-token - The + idempotency token you provided when you launched the instance. + current-instance-boot-mode - The boot mode that is used to launch the instance at launch or + start (legacy-bios | uefi). dns-name - The public DNS name of the instance. + ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS + I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced + networking with ENA. enclave-options.enabled - A Boolean that indicates whether the + instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile - associated with the instance. Specified as an ARN. image-id - The ID of the image used - to launch the instance. instance-id - The ID of the instance. instance-lifecycle - - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled). + associated with the instance. Specified as an ARN. iam-instance-profile.id - The + instance profile associated with the instance. Specified as an ID. + iam-instance-profile.name - The instance profile associated with the instance. Specified as + an name. image-id - The ID of the image used to launch the instance. instance-id - + The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, + a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 @@ -14013,51 +14321,76 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. - kernel-id - The kernel ID. key-name - The name of the key pair used when the instance - was launched. launch-index - When launching multiple instances, this is the index for - the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The - time when the instance was launched, in the ISO 8601 format in the UTC time zone - (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard - (*), for example, 2021-09-29T*, which matches an entire day. - metadata-options.http-tokens - The metadata request authorization state (optional | - required) metadata-options.http-put-response-hop-limit - The HTTP metadata request put - response hop limit (integer, possible values 1 to 64) metadata-options.http-endpoint - + ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name + - The name of the key pair used when the instance was launched. launch-index - When + launching multiple instances, this is the index for the instance in the launch group (for + example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in + the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, + 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which + matches an entire day. maintenance-options.auto-recovery - The current automatic + recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) - metadata-options.instance-metadata-tags - The status of access to instance tags from the - instance metadata (enabled | disabled) monitoring-state - Indicates whether detailed - monitoring is enabled (disabled | enabled). - network-interface.addresses.private-ip-address - The private IPv4 address associated with - the network interface. network-interface.addresses.primary - Specifies whether the IPv4 - address of the network interface is the primary private IPv4 address. + metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled + (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 + endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - + The HTTP metadata request put response hop limit (integer, possible values 1 to 64) + metadata-options.http-tokens - The metadata request authorization state (optional | + required) metadata-options.instance-metadata-tags - The status of access to instance + tags from the instance metadata (enabled | disabled) metadata-options.state - The state + of the metadata option changes (pending | applied). monitoring-state - Indicates whether + detailed monitoring is enabled (disabled | enabled). + network-interface.addresses.association.allocation-id - The allocation ID. + network-interface.addresses.association.association-id - The association ID. + network-interface.addresses.association.carrier-ip - The carrier IP address. + network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. + network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 + address associated with the network interface. + network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic - IP address (IPv4) with a network interface. - network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 - address associated with the network interface. network-interface.association.public-ip - - The address of the Elastic IP address (IPv4) bound to the network interface. - network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) - associated with the network interface. network-interface.association.allocation-id - The - allocation ID returned when you allocated the Elastic IP address (IPv4) for your network - interface. network-interface.association.association-id - The association ID returned - when the network interface was associated with an IPv4 address. + IP address (IPv4) with a network interface. network-interface.addresses.primary - + Specifies whether the IPv4 address of the network interface is the primary private IPv4 + address. network-interface.addresses.private-dns-name - The private DNS name. + network-interface.addresses.private-ip-address - The private IPv4 address associated with + the network interface. network-interface.association.allocation-id - The allocation ID + returned when you allocated the Elastic IP address (IPv4) for your network interface. + network-interface.association.association-id - The association ID returned when the network + interface was associated with an IPv4 address. network-interface.association.carrier-ip + - The customer-owned IP address. network-interface.association.customer-owned-ip - The + customer-owned IP address. network-interface.association.ip-owner-id - The owner of the + Elastic IP address (IPv4) associated with the network interface. + network-interface.association.public-dns-name - The public DNS name. + network-interface.association.public-ip - The address of the Elastic IP address (IPv4) + bound to the network interface. network-interface.attachment.attach-time - The time that + the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. + network-interface.attachment.delete-on-termination - Specifies whether the attachment is + deleted when an instance is terminated. network-interface.attachment.device-index - The + device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. - network-interface.attachment.device-index - The device index to which the network interface - is attached. network-interface.attachment.status - The status of the attachment - (attaching | attached | detaching | detached). network-interface.attachment.attach-time - - The time that the network interface was attached to an instance. - network-interface.attachment.delete-on-termination - Specifies whether the attachment is - deleted when an instance is terminated. network-interface.availability-zone - The - Availability Zone for the network interface. network-interface.description - The - description of the network interface. network-interface.group-id - The ID of a security - group associated with the network interface. network-interface.group-name - The name of - a security group associated with the network interface. + network-interface.attachment.network-card-index - The index of the network card. + network-interface.attachment.status - The status of the attachment (attaching | attached | + detaching | detached). network-interface.availability-zone - The Availability Zone for + the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates + whether a network interface with an IPv6 address is unreachable from the public internet. + network-interface.description - The description of the network interface. + network-interface.group-id - The ID of a security group associated with the network + interface. network-interface.group-name - The name of a security group associated with + the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes + that are assigned to the network interface. network-interface.ipv6-address - The IPv6 + address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the - network interface. network-interface.mac-address - The MAC address of the network - interface. network-interface.network-interface-id - The ID of the network interface. - network-interface.owner-id - The ID of the owner of the network interface. - network-interface.private-dns-name - The private DNS name of the network interface. + network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that + indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A + Boolean that indicates whether this is an IPv6 only network interface. + network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network + interface. network-interface.mac-address - The MAC address of the network interface. + network-interface.network-interface-id - The ID of the network interface. + network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The + ID of the owner of the network interface. network-interface.private-dns-name - The + private DNS name of the network interface. network-interface.private-ip-address - The + private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network @@ -14066,41 +14399,65 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. + network-interface.tag-key - The key of a tag assigned to the network interface. + network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. - private-dns-name - The private IPv4 DNS name of the instance. private-ip-address - The - private IPv4 address of the instance. product-code - The product code associated with - the AMI used to launch the instance. product-code.type - The type of product code - (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the - current state of the instance (for example, shows \"User Initiated [date]\" when you stop - or terminate the instance). Similar to the state-reason-code filter. requester-id - The - ID of the entity that launched the instance on your behalf (for example, Amazon Web - Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the - instance's reservation. A reservation ID is created any time you launch an instance. A - reservation ID has a one-to-one relationship with an instance launch request, but can be - associated with more than one instance if you launch multiple instances using the same - launch request. For example, if you launch one instance, you get one reservation ID. If you - launch ten instances using the same launch request, you also get one reservation ID. - root-device-name - The device name of the root device volume (for example, /dev/sda1). - root-device-type - The type of the root device volume (ebs | instance-store). - source-dest-check - Indicates whether the instance performs source/destination checking. A - value of true means that checking is enabled, and false means that checking is disabled. - The value must be false for the instance to perform network address translation (NAT) in - your VPC. spot-instance-request-id - The ID of the Spot Instance request. - state-reason-code - The reason code for the state change. state-reason-message - A - message that describes the state change. subnet-id - The ID of the subnet for the - instance. tag:<key> - The key/value combination of a tag assigned to the resource. - Use the tag key in the filter name and the tag value as the filter value. For example, to - find all resources that have a tag with the key Owner and the value TeamA, specify - tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag - assigned to the resource. Use this filter to find all resources that have a tag with a - specific key, regardless of the tag value. tenancy - The tenancy of an instance - (dedicated | default | host). virtualization-type - The virtualization type of the - instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running - in. + platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise + Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server + Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat + Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | + Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server + Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with + SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). + private-dns-name - The private IPv4 DNS name of the instance. + private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates + whether to respond to DNS queries for instance hostnames with DNS A records. + private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates + whether to respond to DNS queries for instance hostnames with DNS AAAA records. + private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). + private-ip-address - The private IPv4 address of the instance. This can only be used to + filter by the primary IP address of the network interface attached to the instance. To + filter by additional IP addresses assigned to the network interface, use the filter + network-interface.addresses.private-ip-address. product-code - The product code + associated with the AMI used to launch the instance. product-code.type - The type of + product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The + reason for the current state of the instance (for example, shows \"User Initiated [date]\" + when you stop or terminate the instance). Similar to the state-reason-code filter. + requester-id - The ID of the entity that launched the instance on your behalf (for example, + Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The + ID of the instance's reservation. A reservation ID is created any time you launch an + instance. A reservation ID has a one-to-one relationship with an instance launch request, + but can be associated with more than one instance if you launch multiple instances using + the same launch request. For example, if you launch one instance, you get one reservation + ID. If you launch ten instances using the same launch request, you also get one reservation + ID. root-device-name - The device name of the root device volume (for example, + /dev/sda1). root-device-type - The type of the root device volume (ebs | + instance-store). source-dest-check - Indicates whether the instance performs + source/destination checking. A value of true means that checking is enabled, and false + means that checking is disabled. The value must be false for the instance to perform + network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the + Spot Instance request. state-reason-code - The reason code for the state change. + state-reason-message - A message that describes the state change. subnet-id - The ID of + the subnet for the instance. tag:<key> - The key/value combination of a tag + assigned to the resource. Use the tag key in the filter name and the tag value as the + filter value. For example, to find all resources that have a tag with the key Owner and the + value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. + tag-key - The key of a tag assigned to the resource. Use this filter to find all resources + that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy + of an instance (dedicated | default | host). tpm-support - Indicates if the instance is + configured for NitroTPM support (v2.0). usage-operation - The usage operation value for + the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | + RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | + RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | + RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | + RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time + - The time that the usage operation was last updated, for example, + 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance + (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. - `"InstanceId"`: The instance IDs. Default: Describes all your instances. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -14127,13 +14484,15 @@ end describe_internet_gateways() describe_internet_gateways(params::Dict{String,<:Any}) -Describes one or more of your internet gateways. +Describes your internet gateways. The default is to describe all your internet gateways. +Alternatively, you can specify specific internet gateway IDs or filter the results to +include only the internet gateways that match specific criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. attachment.state - The current state of the - attachment between the gateway and the VPC (available). Present only if a VPC is attached. - attachment.vpc-id - The ID of an attached VPC. internet-gateway-id - The ID of the +- `"Filter"`: The filters. attachment.state - The current state of the attachment + between the gateway and the VPC (available). Present only if a VPC is attached. + attachment.vpc-id - The ID of an attached VPC. internet-gateway-id - The ID of the Internet gateway. owner-id - The ID of the Amazon Web Services account that owns the internet gateway. tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For @@ -14149,7 +14508,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"internetGatewayId"`: One or more internet gateway IDs. Default: Describes all your +- `"internetGatewayId"`: The IDs of the internet gateways. Default: Describes all your internet gateways. """ function describe_internet_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -14168,6 +14527,34 @@ function describe_internet_gateways( ) end +""" + describe_ipam_byoasn() + describe_ipam_byoasn(params::Dict{String,<:Any}) + +Describes your Autonomous System Numbers (ASNs), their provisioning statuses, and the BYOIP +CIDRs with which they are associated. For more information, see Tutorial: Bring your ASN to +IPAM in the Amazon VPC IPAM guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. +- `"NextToken"`: The token for the next page of results. +""" +function describe_ipam_byoasn(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2("DescribeIpamByoasn"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function describe_ipam_byoasn( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamByoasn", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ describe_ipam_pools() describe_ipam_pools(params::Dict{String,<:Any}) @@ -14427,12 +14814,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configuration. network-card-index - The index of the network card. ram-disk-id - The RAM disk ID. - `"LaunchTemplateId"`: The ID of the launch template. To describe one or more versions of - a specified launch template, you must specify either the LaunchTemplateId or the - LaunchTemplateName, but not both. To describe all the latest or default launch template - versions in your account, you must omit this parameter. + a specified launch template, you must specify either the launch template ID or the launch + template name, but not both. To describe all the latest or default launch template versions + in your account, you must omit this parameter. - `"LaunchTemplateName"`: The name of the launch template. To describe one or more versions - of a specified launch template, you must specify either the LaunchTemplateName or the - LaunchTemplateId, but not both. To describe all the latest or default launch template + of a specified launch template, you must specify either the launch template name or the + launch template ID, but not both. To describe all the latest or default launch template versions in your account, you must omit this parameter. - `"LaunchTemplateVersion"`: One or more versions of the launch template. Valid values depend on whether you are describing a specified launch template (by ID or name) or all @@ -14452,7 +14839,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the AMI ID is displayed in the response for imageId. If false, and if a Systems Manager parameter is specified for ImageId, the parameter is displayed in the response for imageId. For more information, see Use a Systems Manager parameter instead of an AMI ID in the - Amazon Elastic Compute Cloud User Guide. Default: false + Amazon EC2 User Guide. Default: false """ function describe_launch_template_versions(; aws_config::AbstractAWSConfig=global_aws_config() @@ -14766,6 +15153,71 @@ function describe_local_gateways( ) end +""" + describe_locked_snapshots() + describe_locked_snapshots(params::Dict{String,<:Any}) + +Describes the lock status for a snapshot. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: The filters. lock-state - The state of the snapshot lock + (compliance-cooloff | governance | compliance | expired). +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. +- `"NextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +- `"SnapshotId"`: The IDs of the snapshots for which to view the lock status. +""" +function describe_locked_snapshots(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "DescribeLockedSnapshots"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_locked_snapshots( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeLockedSnapshots", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_mac_hosts() + describe_mac_hosts(params::Dict{String,<:Any}) + +Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: The filters. availability-zone - The Availability Zone of the EC2 Mac + Dedicated Host. instance-type - The instance type size that the EC2 Mac Dedicated Host + is configured to support. +- `"HostId"`: The IDs of the EC2 Mac Dedicated Hosts. +- `"MaxResults"`: The maximum number of results to return for the request in a single page. + The remaining results can be seen by sending another request with the returned nextToken + value. This value can be between 5 and 500. If maxResults is given a larger value than 500, + you receive an error. +- `"NextToken"`: The token to use to retrieve the next page of results. +""" +function describe_mac_hosts(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2("DescribeMacHosts"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function describe_mac_hosts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeMacHosts", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ describe_managed_prefix_lists() describe_managed_prefix_lists(params::Dict{String,<:Any}) @@ -14845,26 +15297,28 @@ end describe_nat_gateways() describe_nat_gateways(params::Dict{String,<:Any}) -Describes one or more of your NAT gateways. +Describes your NAT gateways. The default is to describe all your NAT gateways. +Alternatively, you can specify specific NAT gateway IDs or filter the results to include +only the NAT gateways that match specific criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Filter"`: One or more filters. nat-gateway-id - The ID of the NAT gateway. state - - The state of the NAT gateway (pending | failed | available | deleting | deleted). - subnet-id - The ID of the subnet in which the NAT gateway resides. tag:<key> - The - key/value combination of a tag assigned to the resource. Use the tag key in the filter name - and the tag value as the filter value. For example, to find all resources that have a tag - with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - the filter value. tag-key - The key of a tag assigned to the resource. Use this filter - to find all resources assigned a tag with a specific key, regardless of the tag value. +- `"Filter"`: The filters. nat-gateway-id - The ID of the NAT gateway. state - The + state of the NAT gateway (pending | failed | available | deleting | deleted). subnet-id + - The ID of the subnet in which the NAT gateway resides. tag:<key> - The key/value + combination of a tag assigned to the resource. Use the tag key in the filter name and the + tag value as the filter value. For example, to find all resources that have a tag with the + key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the + filter value. tag-key - The key of a tag assigned to the resource. Use this filter to + find all resources assigned a tag with a specific key, regardless of the tag value. vpc-id - The ID of the VPC in which the NAT gateway resides. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. -- `"NatGatewayId"`: One or more NAT gateway IDs. +- `"NatGatewayId"`: The IDs of the NAT gateways. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. """ @@ -14888,13 +15342,15 @@ end describe_network_acls() describe_network_acls(params::Dict{String,<:Any}) -Describes one or more of your network ACLs. For more information, see Network ACLs in the -Amazon Virtual Private Cloud User Guide. +Describes your network ACLs. The default is to describe all your network ACLs. +Alternatively, you can specify specific network ACL IDs or filter the results to include +only the network ACLs that match specific criteria. For more information, see Network ACLs +in the Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. association.association-id - The ID of an association - ID for the ACL. association.network-acl-id - The ID of the network ACL involved in the +- `"Filter"`: The filters. association.association-id - The ID of an association ID for + the ACL. association.network-acl-id - The ID of the network ACL involved in the association. association.subnet-id - The ID of the subnet involved in the association. default - Indicates whether the ACL is the default network ACL for the VPC. entry.cidr - The IPv4 CIDR range specified in the entry. entry.icmp.code - The ICMP code specified @@ -14917,7 +15373,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. -- `"NetworkAclId"`: One or more network ACL IDs. Default: Describes all your network ACLs. +- `"NetworkAclId"`: The IDs of the network ACLs. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. - `"dryRun"`: Checks whether you have the required permissions for the action, without @@ -15199,7 +15655,11 @@ end describe_network_interfaces() describe_network_interfaces(params::Dict{String,<:Any}) -Describes one or more of your network interfaces. +Describes one or more of your network interfaces. If you have a large number of network +interfaces, the operation fails unless you use pagination or one of the following filters: +group-id, mac-address, private-dns-name, private-ip-address, private-dns-name, subnet-id, +or vpc-id. We strongly recommend using only paginated requests. Unpaginated requests are +susceptible to throttling and timeouts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15214,55 +15674,54 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"filter"`: One or more filters. addresses.private-ip-address - The private IPv4 - addresses associated with the network interface. addresses.primary - Whether the private - IPv4 address is the primary IP address associated with the network interface. - addresses.association.public-ip - The association ID returned when the network interface - was associated with the Elastic IP address (IPv4). addresses.association.owner-id - The - owner ID of the addresses associated with the network interface. +- `"filter"`: One or more filters. association.allocation-id - The allocation ID + returned when you allocated the Elastic IP address (IPv4) for your network interface. association.association-id - The association ID returned when the network interface was - associated with an IPv4 address. association.allocation-id - The allocation ID returned - when you allocated the Elastic IP address (IPv4) for your network interface. - association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the - network interface. association.public-ip - The address of the Elastic IP address (IPv4) - bound to the network interface. association.public-dns-name - The public DNS name for - the network interface (IPv4). attachment.attachment-id - The ID of the interface - attachment. attachment.attach-time - The time that the network interface was attached to - an instance. attachment.delete-on-termination - Indicates whether the attachment is - deleted when an instance is terminated. attachment.device-index - The device index to - which the network interface is attached. attachment.instance-id - The ID of the instance - to which the network interface is attached. attachment.instance-owner-id - The owner ID - of the instance to which the network interface is attached. attachment.status - The - status of the attachment (attaching | attached | detaching | detached). - availability-zone - The Availability Zone of the network interface. description - The - description of the network interface. group-id - The ID of a security group associated - with the network interface. group-name - The name of a security group associated with - the network interface. ipv6-addresses.ipv6-address - An IPv6 address associated with the - network interface. interface-type - The type of network interface (api_gateway_managed | - aws_codestar_connections_managed | branch | efa | gateway_load_balancer | - gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed - | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | - transit_gateway | trunk | vpc_endpoint). mac-address - The MAC address of the network - interface. network-interface-id - The ID of the network interface. owner-id - The - Amazon Web Services account ID of the network interface owner. private-ip-address - The - private IPv4 address or addresses of the network interface. private-dns-name - The - private DNS name of the network interface (IPv4). requester-id - The alias or Amazon Web - Services account ID of the principal or service that created the network interface. - requester-managed - Indicates whether the network interface is being managed by an Amazon - Web Service (for example, Amazon Web Services Management Console, Auto Scaling, and so on). - source-dest-check - Indicates whether the network interface performs source/destination - checking. A value of true means checking is enabled, and false means checking is disabled. - The value must be false for the network interface to perform network address translation - (NAT) in your VPC. status - The status of the network interface. If the network - interface is not attached to an instance, the status is available; if a network interface - is attached to an instance the status is in-use. subnet-id - The ID of the subnet for - the network interface. tag:<key> - The key/value combination of a tag assigned to - the resource. Use the tag key in the filter name and the tag value as the filter value. For - example, to find all resources that have a tag with the key Owner and the value TeamA, - specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key - of a tag assigned to the resource. Use this filter to find all resources assigned a tag - with a specific key, regardless of the tag value. vpc-id - The ID of the VPC for the - network interface. + associated with an IPv4 address. addresses.association.owner-id - The owner ID of the + addresses associated with the network interface. addresses.association.public-ip - The + association ID returned when the network interface was associated with the Elastic IP + address (IPv4). addresses.primary - Whether the private IPv4 address is the primary IP + address associated with the network interface. addresses.private-ip-address - The + private IPv4 addresses associated with the network interface. association.ip-owner-id - + The owner of the Elastic IP address (IPv4) associated with the network interface. + association.public-ip - The address of the Elastic IP address (IPv4) bound to the network + interface. association.public-dns-name - The public DNS name for the network interface + (IPv4). attachment.attach-time - The time that the network interface was attached to an + instance. attachment.attachment-id - The ID of the interface attachment. + attachment.delete-on-termination - Indicates whether the attachment is deleted when an + instance is terminated. attachment.device-index - The device index to which the network + interface is attached. attachment.instance-id - The ID of the instance to which the + network interface is attached. attachment.instance-owner-id - The owner ID of the + instance to which the network interface is attached. attachment.status - The status of + the attachment (attaching | attached | detaching | detached). availability-zone - The + Availability Zone of the network interface. description - The description of the network + interface. group-id - The ID of a security group associated with the network interface. + ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface. + interface-type - The type of network interface (api_gateway_managed | + aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efs | + gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | + interface | iot_rules_managed | lambda | load_balancer | nat_gateway | + network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint). + mac-address - The MAC address of the network interface. network-interface-id - The ID of + the network interface. owner-id - The Amazon Web Services account ID of the network + interface owner. private-dns-name - The private DNS name of the network interface + (IPv4). private-ip-address - The private IPv4 address or addresses of the network + interface. requester-id - The alias or Amazon Web Services account ID of the principal + or service that created the network interface. requester-managed - Indicates whether the + network interface is being managed by an Amazon Web Service (for example, Amazon Web + Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates + whether the network interface performs source/destination checking. A value of true means + checking is enabled, and false means checking is disabled. The value must be false for the + network interface to perform network address translation (NAT) in your VPC. status - + The status of the network interface. If the network interface is not attached to an + instance, the status is available; if a network interface is attached to an instance the + status is in-use. subnet-id - The ID of the subnet for the network interface. + tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag + key in the filter name and the tag value as the filter value. For example, to find all + resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the + filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the + resource. Use this filter to find all resources assigned a tag with a specific key, + regardless of the tag value. vpc-id - The ID of the VPC for the network interface. """ function describe_network_interfaces(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -15447,9 +15906,11 @@ end describe_regions(params::Dict{String,<:Any}) Describes the Regions that are enabled for your account, or all Regions. For a list of the -Regions supported by Amazon EC2, see Amazon Elastic Compute Cloud endpoints and quotas. -For information about enabling and disabling Regions for your account, see Managing Amazon -Web Services Regions in the Amazon Web Services General Reference. +Regions supported by Amazon EC2, see Amazon EC2 service endpoints. For information about +enabling and disabling Regions for your account, see Specify which Amazon Web Services +Regions your account can use in the Amazon Web Services Account Management Reference Guide. + The order of the elements in the response, including those within nested structures, might +vary. Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15481,7 +15942,7 @@ end describe_replace_root_volume_tasks(params::Dict{String,<:Any}) Describes a root volume replacement task. For more information, see Replace a root volume -in the Amazon Elastic Compute Cloud User Guide. +in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15522,7 +15983,9 @@ end describe_reserved_instances(params::Dict{String,<:Any}) Describes one or more of the Reserved Instances that you purchased. For more information -about Reserved Instances, see Reserved Instances in the Amazon EC2 User Guide. +about Reserved Instances, see Reserved Instances in the Amazon EC2 User Guide. The order +of the elements in the response, including those within nested structures, might vary. +Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15587,8 +16050,10 @@ buyer, you specify the configuration of the Reserved Instance to purchase, and t Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price -of all of the listings that you purchase. For more information, see Reserved Instance -Marketplace in the Amazon EC2 User Guide. +of all of the listings that you purchase. For more information, see Sell in the Reserved +Instance Marketplace in the Amazon EC2 User Guide. The order of the elements in the +response, including those within nested structures, might vary. Applications should not +assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15626,7 +16091,9 @@ end Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned. -For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide. +For more information, see Modify Reserved Instances in the Amazon EC2 User Guide. The +order of the elements in the response, including those within nested structures, might +vary. Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15677,8 +16144,10 @@ time period, you do not receive insufficient capacity errors, and you pay a lowe rate than the rate charged for On-Demand instances for the actual time used. If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own -Reserved Instances. For more information, see Reserved Instance Marketplace in the Amazon -EC2 User Guide. +Reserved Instances. For more information, see Sell in the Reserved Instance Marketplace in +the Amazon EC2 User Guide. The order of the elements in the response, including those +within nested structures, might vary. Applications should not assume the elements appear in +a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15699,7 +16168,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84). - `"IncludeMarketplace"`: Include Reserved Instance Marketplace offerings in the response. - `"InstanceType"`: The instance type that the reservation will cover (for example, - m1.small). For more information, see Instance types in the Amazon EC2 User Guide. + m1.small). For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide. - `"MaxDuration"`: The maximum duration (in seconds) to filter when searching for offerings. Default: 94608000 (3 years) - `"MaxInstanceCount"`: The maximum number of instances to filter when searching for @@ -15750,21 +16219,24 @@ end describe_route_tables() describe_route_tables(params::Dict{String,<:Any}) -Describes one or more of your route tables. Each subnet in your VPC must be associated with -a route table. If a subnet is not explicitly associated with any route table, it is -implicitly associated with the main route table. This command does not return the subnet ID -for implicit associations. For more information, see Route tables in the Amazon Virtual -Private Cloud User Guide. +Describes your route tables. The default is to describe all your route tables. +Alternatively, you can specify specific route table IDs or filter the results to include +only the route tables that match specific criteria. Each subnet in your VPC must be +associated with a route table. If a subnet is not explicitly associated with any route +table, it is implicitly associated with the main route table. This command does not return +the subnet ID for implicit associations. For more information, see Route tables in the +Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. association.route-table-association-id - The ID of an - association ID for the route table. association.route-table-id - The ID of the route - table involved in the association. association.subnet-id - The ID of the subnet involved - in the association. association.main - Indicates whether the route table is the main - route table for the VPC (true | false). Route tables that do not have an association ID are - not returned in the response. owner-id - The ID of the Amazon Web Services account that - owns the route table. route-table-id - The ID of the route table. +- `"Filter"`: The filters. association.gateway-id - The ID of the gateway involved in + the association. association.route-table-association-id - The ID of an association ID + for the route table. association.route-table-id - The ID of the route table involved in + the association. association.subnet-id - The ID of the subnet involved in the + association. association.main - Indicates whether the route table is the main route + table for the VPC (true | false). Route tables that do not have an association ID are not + returned in the response. owner-id - The ID of the Amazon Web Services account that owns + the route table. route-table-id - The ID of the route table. route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table. route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table. route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service @@ -15792,7 +16264,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"RouteTableId"`: One or more route table IDs. Default: Describes all your route tables. +- `"RouteTableId"`: The IDs of the route tables. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -15923,8 +16395,8 @@ end describe_security_group_references(item) describe_security_group_references(item, params::Dict{String,<:Any}) -[VPC only] Describes the VPCs on the other side of a VPC peering connection that are -referencing the security groups you've specified in this request. +Describes the VPCs on the other side of a VPC peering connection that are referencing the +security groups you've specified in this request. # Arguments - `item`: The IDs of the security groups in your account. @@ -16002,13 +16474,7 @@ end describe_security_groups() describe_security_groups(params::Dict{String,<:Any}) -Describes the specified security groups or all of your security groups. A security group is -for use with instances either in the EC2-Classic platform or in a specific VPC. For more -information, see Amazon EC2 security groups in the Amazon Elastic Compute Cloud User Guide -and Security groups for your VPC in the Amazon Virtual Private Cloud User Guide. We are -retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more -information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User -Guide. +Describes the specified security groups or all of your security groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -16050,10 +16516,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys VPC specified when the security group was created. - `"GroupId"`: The IDs of the security groups. Required for security groups in a nondefault VPC. Default: Describes all of your security groups. -- `"GroupName"`: [EC2-Classic and default VPC only] The names of the security groups. You - can specify either the security group name or the security group ID. For security groups in - a nondefault VPC, use the group-name filter to describe security groups by name. Default: - Describes all of your security groups. +- `"GroupName"`: [Default VPC] The names of the security groups. You can specify either the + security group name or the security group ID. Default: Describes all of your security + groups. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. This value can be between 5 and 1000. If this parameter is not specified, then all items are returned. For @@ -16086,7 +16551,7 @@ end Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time. For more information about EBS snapshots, see Amazon EBS snapshots in -the Amazon Elastic Compute Cloud User Guide. +the Amazon EBS User Guide. # Arguments - `attribute`: The snapshot attribute you would like to view. @@ -16195,7 +16660,9 @@ you own or have explicit permissions, or all for public snapshots. If you are de long list of snapshots, we recommend that you paginate the output to make the list more manageable. For more information, see Pagination. To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. For more information about EBS -snapshots, see Amazon EBS snapshots in the Amazon Elastic Compute Cloud User Guide. +snapshots, see Amazon EBS snapshots in the Amazon EBS User Guide. We strongly recommend +using only paginated requests. Unpaginated requests are susceptible to throttling and +timeouts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -16215,10 +16682,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. volume-id - The ID of the volume the snapshot is for. volume-size - The size of the volume, in GiB. -- `"MaxResults"`: The maximum number of snapshots to return for this request. This value - can be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results are - returned. If this parameter is not used, then the request returns all snapshots. You cannot - specify this parameter and the snapshot IDs parameter in the same request. For more +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -16248,7 +16713,7 @@ end describe_spot_datafeed_subscription(params::Dict{String,<:Any}) Describes the data feed for Spot Instances. For more information, see Spot Instance data -feed in the Amazon EC2 User Guide for Linux Instances. +feed in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -16447,10 +16912,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys name for the volume in the block device mapping (for example, /dev/sdh or xvdh). launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume. launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB. - launch.block-device-mapping.volume-type - The type of EBS volume: gp2 for General Purpose - SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1for Cold - HDD, or standard for Magnetic. launch.group-id - The ID of the security group for the - instance. launch.group-name - The name of the security group for the instance. + launch.block-device-mapping.volume-type - The type of EBS volume: gp2 or gp3 for General + Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for + Cold HDD, or standard for Magnetic. launch.group-id - The ID of the security group for + the instance. launch.group-name - The name of the security group for the instance. launch.image-id - The ID of the AMI. launch.instance-type - The type of instance (for example, m3.medium). launch.kernel-id - The kernel ID. launch.key-name - The name of the key pair the instance launched with. launch.monitoring-enabled - Whether detailed @@ -16471,15 +16936,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys request. state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 - User Guide for Linux Instances. status-code - The short code describing the most recent - evaluation of your Spot Instance request. status-message - The message explaining the - status of the Spot Instance request. tag:<key> - The key/value combination of a - tag assigned to the resource. Use the tag key in the filter name and the tag value as the - filter value. For example, to find all resources that have a tag with the key Owner and the - value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. - tag-key - The key of a tag assigned to the resource. Use this filter to find all resources - assigned a tag with a specific key, regardless of the tag value. type - The type of Spot - Instance request (one-time | persistent). valid-from - The start date of the request. + User Guide. status-code - The short code describing the most recent evaluation of your + Spot Instance request. status-message - The message explaining the status of the Spot + Instance request. tag:<key> - The key/value combination of a tag assigned to the + resource. Use the tag key in the filter name and the tag value as the filter value. For + example, to find all resources that have a tag with the key Owner and the value TeamA, + specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key + of a tag assigned to the resource. Use this filter to find all resources assigned a tag + with a specific key, regardless of the tag value. type - The type of Spot Instance + request (one-time | persistent). valid-from - The start date of the request. valid-until - The end date of the request. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more @@ -16516,10 +16981,9 @@ end describe_spot_price_history(params::Dict{String,<:Any}) Describes the Spot price history. For more information, see Spot Instance pricing history -in the Amazon EC2 User Guide for Linux Instances. When you specify a start and end time, -the operation returns the prices of the instance types within that time range. It also -returns the last price change before the start time, which is the effective price as of the -start time. +in the Amazon EC2 User Guide. When you specify a start and end time, the operation returns +the prices of the instance types within that time range. It also returns the last price +change before the start time, which is the effective price as of the start time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -16530,8 +16994,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Linux (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)). spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported). timestamp - The time stamp of the Spot price history, in - UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater - than or less than comparison is not supported. + UTC format (for example, ddd MMM dd HH:mm:ss UTC YYYY). You can use wildcards (* and ?). + Greater than or less than comparison is not supported. - `"InstanceType"`: Filters the results by the specified instance types. - `"ProductDescription"`: Filters the results by the specified basic product descriptions. - `"availabilityZone"`: Filters the results by the specified Availability Zone. @@ -16568,9 +17032,9 @@ end describe_stale_security_groups(vpc_id) describe_stale_security_groups(vpc_id, params::Dict{String,<:Any}) -[VPC only] Describes the stale security group rules for security groups in a specified VPC. -Rules are stale when they reference a deleted security group in the same VPC or in a peer -VPC, or if they reference a security group in a peer VPC for which the VPC peering +Describes the stale security group rules for security groups in a specified VPC. Rules are +stale when they reference a deleted security group in the same VPC or peered VPC. Rules can +also be stale if they reference a security group in a peer VPC for which the VPC peering connection has been deleted. # Arguments @@ -16629,12 +17093,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"Filter"`: The filters. task-state - Returns tasks in a certain state (InProgress | Completed | Failed) bucket - Returns task information for tasks that targeted a specific - bucket. For the filter value, specify the bucket name. + bucket. For the filter value, specify the bucket name. When you specify the ImageIds + parameter, any filters that you specify are ignored. To use the filters, you must remove + the ImageIds parameter. - `"ImageId"`: The AMI IDs for which to show progress. Up to 20 AMI IDs can be included in a request. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more - information, see Pagination. You cannot specify this parameter and the ImageIDs parameter + information, see Pagination. You cannot specify this parameter and the ImageIds parameter in the same call. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -16659,25 +17125,26 @@ end describe_subnets() describe_subnets(params::Dict{String,<:Any}) -Describes one or more of your subnets. For more information, see Your VPC and subnets in -the Amazon Virtual Private Cloud User Guide. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. availability-zone - The Availability Zone for the - subnet. You can also use availabilityZone as the filter name. availability-zone-id - The - ID of the Availability Zone for the subnet. You can also use availabilityZoneId as the - filter name. available-ip-address-count - The number of IPv4 addresses in the subnet - that are available. cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you - specify must exactly match the subnet's CIDR block for information to be returned for the - subnet. You can also use cidr or cidrBlock as the filter names. customer-owned-ipv4-pool - - The customer-owned IPv4 address pool associated with the subnet. default-for-az - - Indicates whether this is the default subnet for the Availability Zone (true | false). You - can also use defaultForAz as the filter name. enable-dns64 - Indicates whether DNS - queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic - IPv6 addresses for IPv4-only destinations. enable-lni-at-device-index - Indicates the - device position for local network interfaces in this subnet. For example, 1 indicates local - network interfaces in this subnet are the secondary network interface (eth1). +Describes your subnets. The default is to describe all your subnets. Alternatively, you can +specify specific subnet IDs or filter the results to include only the subnets that match +specific criteria. For more information, see Subnets in the Amazon VPC User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: The filters. availability-zone - The Availability Zone for the subnet. You + can also use availabilityZone as the filter name. availability-zone-id - The ID of the + Availability Zone for the subnet. You can also use availabilityZoneId as the filter name. + available-ip-address-count - The number of IPv4 addresses in the subnet that are + available. cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you specify + must exactly match the subnet's CIDR block for information to be returned for the subnet. + You can also use cidr or cidrBlock as the filter names. customer-owned-ipv4-pool - The + customer-owned IPv4 address pool associated with the subnet. default-for-az - Indicates + whether this is the default subnet for the Availability Zone (true | false). You can also + use defaultForAz as the filter name. enable-dns64 - Indicates whether DNS queries made + to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses + for IPv4-only destinations. enable-lni-at-device-index - Indicates the device position + for local network interfaces in this subnet. For example, 1 indicates local network + interfaces in this subnet are the secondary network interface (eth1). ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the subnet. ipv6-cidr-block-association.association-id - An association ID for an IPv6 CIDR block associated with the subnet. ipv6-cidr-block-association.state - The state of an @@ -16709,7 +17176,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"SubnetId"`: One or more subnet IDs. Default: Describes all your subnets. +- `"SubnetId"`: The IDs of the subnets. Default: Describes all your subnets. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -16730,19 +17197,19 @@ end describe_tags(params::Dict{String,<:Any}) Describes the specified tags for your EC2 resources. For more information about tags, see -Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide. +Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide. We strongly +recommend using only paginated requests. Unpaginated requests are susceptible to throttling +and timeouts. The order of the elements in the response, including those within nested +structures, might vary. Applications should not assume the elements appear in a particular +order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filter"`: The filters. key - The tag key. resource-id - The ID of the resource. - resource-type - The resource type (customer-gateway | dedicated-host | dhcp-options | - elastic-ip | fleet | fpga-image | host-reservation | image | instance | internet-gateway | - key-pair | launch-template | natgateway | network-acl | network-interface | placement-group - | reserved-instances | route-table | security-group | snapshot | spot-instances-request | - subnet | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection | - vpn-connection | vpn-gateway). tag:<key> - The key/value combination of the tag. - For example, specify \"tag:Owner\" for the filter name and \"TeamA\" for the filter value - to find resources with the tag \"Owner=TeamA\". value - The tag value. + resource-type - The resource type. For a list of possible values, see TagSpecification. + tag:<key> - The key/value combination of the tag. For example, specify \"tag:Owner\" + for the filter name and \"TeamA\" for the filter value to find resources with the tag + \"Owner=TeamA\". value - The tag value. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -16763,6 +17230,52 @@ function describe_tags( ) end +""" + describe_traffic_mirror_filter_rules() + describe_traffic_mirror_filter_rules(params::Dict{String,<:Any}) + +Describe traffic mirror filters that determine the traffic that is mirrored. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: Traffic mirror filters. traffic-mirror-filter-rule-id: The ID of the + Traffic Mirror rule. traffic-mirror-filter-id: The ID of the filter that this rule is + associated with. rule-number: The number of the Traffic Mirror rule. rule-action: The + action taken on the filtered traffic. Possible actions are accept and reject. + traffic-direction: The traffic direction. Possible directions are ingress and egress. + protocol: The protocol, for example UDP, assigned to the Traffic Mirror rule. + source-cidr-block: The source CIDR block assigned to the Traffic Mirror rule. + destination-cidr-block: The destination CIDR block assigned to the Traffic Mirror rule. + description: The description of the Traffic Mirror rule. +- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. +- `"NextToken"`: The token for the next page of results. +- `"TrafficMirrorFilterId"`: Traffic filter ID. +- `"TrafficMirrorFilterRuleId"`: Traffic filter rule IDs. +""" +function describe_traffic_mirror_filter_rules(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeTrafficMirrorFilterRules"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_traffic_mirror_filter_rules( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeTrafficMirrorFilterRules", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_traffic_mirror_filters() describe_traffic_mirror_filters(params::Dict{String,<:Any}) @@ -17297,7 +17810,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Equal Cost Multipath Protocol support is enabled (enable | disable). owner-id - The ID of the Amazon Web Services account that owns the transit gateway. state - The state of the transit gateway (available | deleted | deleting | modifying | pending). - transit-gateway-id - The ID of the transit gateway. + transit-gateway-id - The ID of the transit gateway. tag-key - The key/value combination + of a tag assigned to the resource. Use the tag key in the filter name and the tag value as + the filter value. For example, to find all resources that have a tag with the key Owner and + the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. - `"MaxResults"`: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value. - `"NextToken"`: The token for the next page of results. @@ -17323,9 +17839,7 @@ end describe_trunk_interface_associations() describe_trunk_interface_associations(params::Dict{String,<:Any}) - This API action is currently in limited preview only. If you are interested in using this -feature, contact your account manager. Describes one or more network interface trunk -associations. +Describes one or more network interface trunk associations. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -17553,7 +18067,7 @@ end Describes the specified attribute of the specified volume. You can specify only one attribute at a time. For more information about EBS volumes, see Amazon EBS volumes in the -Amazon Elastic Compute Cloud User Guide. +Amazon EBS User Guide. # Arguments - `attribute`: The attribute of the volume. This parameter is required. @@ -17611,19 +18125,20 @@ the volume. The possible values are ok, impaired , warning, or insufficient-data checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks might still be taking place on your volume at the time. We recommend that you retry the request. For more -information about volume status, see Monitor the status of your volumes in the Amazon -Elastic Compute Cloud User Guide. Events: Reflect the cause of a volume status and might -require you to take action. For example, if your volume returns an impaired status, then -the volume event might be potential-data-inconsistency. This means that your volume has -been affected by an issue with the underlying host, has all I/O operations disabled, and -might have inconsistent data. Actions: Reflect the actions you might have to take in -response to an event. For example, if the status of the volume is impaired and the volume -event shows potential-data-inconsistency, then the action shows enable-volume-io. This -means that you may want to enable the I/O operations for the volume by calling the -EnableVolumeIO action and then check the volume for data consistency. Volume status is -based on the volume status checks, and does not reflect the volume state. Therefore, volume -status does not indicate volumes in the error state (for example, when a volume is -incapable of accepting I/O.) +information about volume status, see Monitor the status of your volumes in the Amazon EBS +User Guide. Events: Reflect the cause of a volume status and might require you to take +action. For example, if your volume returns an impaired status, then the volume event might +be potential-data-inconsistency. This means that your volume has been affected by an issue +with the underlying host, has all I/O operations disabled, and might have inconsistent +data. Actions: Reflect the actions you might have to take in response to an event. For +example, if the status of the volume is impaired and the volume event shows +potential-data-inconsistency, then the action shows enable-volume-io. This means that you +may want to enable the I/O operations for the volume by calling the EnableVolumeIO action +and then check the volume for data consistency. Volume status is based on the volume status +checks, and does not reflect the volume state. Therefore, volume status does not indicate +volumes in the error state (for example, when a volume is incapable of accepting I/O.) The +order of the elements in the response, including those within nested structures, might +vary. Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -17641,11 +18156,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data). - `"MaxResults"`: The maximum number of items to return for this request. To get the next - page of items, make another request with the token returned in the output. This value can - be between 5 and 1,000; if the value is larger than 1,000, only 1,000 results are returned. - If this parameter is not used, then all items are returned. You cannot specify this - parameter and the volume IDs parameter in the same request. For more information, see - Pagination. + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. - `"VolumeId"`: The IDs of the volumes. Default: Describes all your volumes. @@ -17676,7 +18188,10 @@ end Describes the specified EBS volumes or all of your EBS volumes. If you are describing a long list of volumes, we recommend that you paginate the output to make the list more manageable. For more information, see Pagination. For more information about EBS volumes, -see Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide. +see Amazon EBS volumes in the Amazon EBS User Guide. We strongly recommend using only +paginated requests. Unpaginated requests are susceptible to throttling and timeouts. The +order of the elements in the response, including those within nested structures, might +vary. Applications should not assume the elements appear in a particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -17704,13 +18219,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"maxResults"`: The maximum number of volumes to return for this request. This value can - be between 5 and 500; if you specify a value larger than 500, only 500 items are returned. - If this parameter is not used, then all items are returned. You cannot specify this - parameter and the volume IDs parameter in the same request. For more information, see - Pagination. +- `"maxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"nextToken"`: The token returned from a previous paginated request. Pagination continues - from the end of the items returned from the previous request. + from the end of the items returned by the previous request. """ function describe_volumes(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2("DescribeVolumes"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -17730,10 +18243,8 @@ end Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification -request. You can also use CloudWatch Events to check the status of a modification to an EBS -volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User -Guide. For more information, see Monitor the progress of volume modifications in the Amazon -Elastic Compute Cloud User Guide. +request. For more information, see Monitor the progress of volume modifications in the +Amazon EBS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -17752,7 +18263,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enabled (true | false). volume-id - The ID of the volume. - `"MaxResults"`: The maximum number of results (up to a limit of 500) to be returned in a paginated request. For more information, see Pagination. -- `"NextToken"`: The token returned by a previous paginated request. Pagination continues +- `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. - `"VolumeId"`: The IDs of the volumes. """ @@ -17823,20 +18334,18 @@ end describe_vpc_classic_link() describe_vpc_classic_link(params::Dict{String,<:Any}) -Describes the ClassicLink status of one or more VPCs. We are retiring EC2-Classic. We -recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Describes the ClassicLink status of the specified VPCs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. is-classic-link-enabled - Whether the VPC is enabled - for ClassicLink (true | false). tag:<key> - The key/value combination of a tag +- `"Filter"`: The filters. is-classic-link-enabled - Whether the VPC is enabled for + ClassicLink (true | false). tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. -- `"VpcId"`: One or more VPCs for which you want to describe the ClassicLink status. +- `"VpcId"`: The VPCs for which you want to describe the ClassicLink status. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -17861,18 +18370,15 @@ end describe_vpc_classic_link_dns_support() describe_vpc_classic_link_dns_support(params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Describes the ClassicLink DNS support status of one or more VPCs. If enabled, -the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when -addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of -an instance in a VPC resolves to its private IP address when addressed from a linked -EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. + This action is deprecated. Describes the ClassicLink DNS support status of one or more +VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private +IP address when addressed from an instance in the VPC to which it's linked. Similarly, the +DNS hostname of an instance in a VPC resolves to its private IP address when addressed from +a linked EC2-Classic instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"VpcIds"`: One or more VPC IDs. +- `"VpcIds"`: The IDs of the VPCs. - `"maxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -18132,7 +18638,9 @@ end describe_vpc_endpoints() describe_vpc_endpoints(params::Dict{String,<:Any}) -Describes your VPC endpoints. +Describes your VPC endpoints. The default is to describe all your VPC endpoints. +Alternatively, you can specify specific VPC endpoint IDs or filter the results to include +only the VPC endpoints that match specific criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -18177,12 +18685,14 @@ end describe_vpc_peering_connections() describe_vpc_peering_connections(params::Dict{String,<:Any}) -Describes one or more of your VPC peering connections. +Describes your VPC peering connections. The default is to describe all your VPC peering +connections. Alternatively, you can specify specific VPC peering connection IDs or filter +the results to include only the VPC peering connections that match specific criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. accepter-vpc-info.cidr-block - The IPv4 CIDR block of - the accepter VPC. accepter-vpc-info.owner-id - The ID of the Amazon Web Services account +- `"Filter"`: The filters. accepter-vpc-info.cidr-block - The IPv4 CIDR block of the + accepter VPC. accepter-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the accepter VPC. accepter-vpc-info.vpc-id - The ID of the accepter VPC. expiration-time - The expiration date and time for the VPC peering connection. requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's VPC. @@ -18203,7 +18713,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"VpcPeeringConnectionId"`: One or more VPC peering connection IDs. Default: Describes +- `"VpcPeeringConnectionId"`: The IDs of the VPC peering connections. Default: Describes all your VPC peering connections. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -18233,14 +18743,16 @@ end describe_vpcs() describe_vpcs(params::Dict{String,<:Any}) -Describes one or more of your VPCs. +Describes your VPCs. The default is to describe all your VPCs. Alternatively, you can +specify specific VPC IDs or filter the results to include only the VPCs that match specific +criteria. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. cidr - The primary IPv4 CIDR block of the VPC. The - CIDR block you specify must exactly match the VPC's CIDR block for information to be - returned for the VPC. Must contain the slash followed by one or two digits (for example, - /28). cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC. +- `"Filter"`: The filters. cidr - The primary IPv4 CIDR block of the VPC. The CIDR block + you specify must exactly match the VPC's CIDR block for information to be returned for the + VPC. Must contain the slash followed by one or two digits (for example, /28). + cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC. cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC. cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC. dhcp-options-id - The ID of a set of DHCP options. @@ -18262,7 +18774,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"VpcId"`: One or more VPC IDs. Default: Describes all your VPCs. +- `"VpcId"`: The IDs of the VPCs. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18374,11 +18886,9 @@ end detach_classic_link_vpc(instance_id, vpc_id) detach_classic_link_vpc(instance_id, vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the -instance has been unlinked, the VPC security groups are no longer associated with it. An -instance is automatically unlinked from a VPC when it's stopped. + This action is deprecated. Unlinks (detaches) a linked EC2-Classic instance from a VPC. +After the instance has been unlinked, the VPC security groups are no longer associated with +it. An instance is automatically unlinked from a VPC when it's stopped. # Arguments - `instance_id`: The ID of the instance to unlink from the VPC. @@ -18532,7 +19042,7 @@ specified Amazon Web Services Verified Access instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18588,9 +19098,11 @@ detachment can be delayed indefinitely until you unmount the volume, force detac reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first. When a volume with an Amazon Web Services Marketplace product code is -detached from an instance, the product code is no longer associated with the instance. For -more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User -Guide. +detached from an instance, the product code is no longer associated with the instance. You +can't detach or force detach volumes that are attached to Amazon ECS or Fargate tasks. +Attempting to do this results in the UnsupportedOperationException exception with the +Unable to detach volume attached to ECS tasks error message. For more information, see +Detach an Amazon EBS volume in the Amazon EBS User Guide. # Arguments - `volume_id`: The ID of the volume. @@ -18689,7 +19201,7 @@ end disable_address_transfer(allocation_id, params::Dict{String,<:Any}) Disables Elastic IP address transfer. For more information, see Transfer Elastic IP -addresses in the Amazon Virtual Private Cloud User Guide. +addresses in the Amazon VPC User Guide. # Arguments - `allocation_id`: The allocation ID of an Elastic IP address. @@ -18771,7 +19283,7 @@ Disables EBS encryption by default for your account in the current Region. After disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume. Disabling encryption by default does not change the encryption status of your existing volumes. For more information, see Amazon EBS encryption -in the Amazon Elastic Compute Cloud User Guide. +in the Amazon EBS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -18803,23 +19315,23 @@ end disable_fast_launch(image_id) disable_fast_launch(image_id, params::Dict{String,<:Any}) -Discontinue faster launching for a Windows AMI, and clean up existing pre-provisioned -snapshots. When you disable faster launching, the AMI uses the standard launch process for -each instance. All pre-provisioned snapshots must be removed before you can enable faster -launching again. To change these settings, you must own the AMI. +Discontinue Windows fast launch for a Windows AMI, and clean up existing pre-provisioned +snapshots. After you disable Windows fast launch, the AMI uses the standard launch process +for each new instance. Amazon EC2 must remove all pre-provisioned snapshots before you can +enable Windows fast launch again. You can only change these settings for Windows AMIs that +you own or that have been shared with you. # Arguments -- `image_id`: The ID of the image for which you’re turning off faster launching, and - removing pre-provisioned snapshots. +- `image_id`: Specify the ID of the image for which to disable Windows fast launch. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Force"`: Forces the image settings to turn off faster launching for your Windows AMI. - This parameter overrides any errors that are encountered while cleaning up resources in - your account. +- `"Force"`: Forces the image settings to turn off Windows fast launch for your Windows + AMI. This parameter overrides any errors that are encountered while cleaning up resources + in your account. """ function disable_fast_launch(ImageId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -18894,11 +19406,16 @@ function disable_fast_snapshot_restores( end """ - disable_image_deprecation(image_id) - disable_image_deprecation(image_id, params::Dict{String,<:Any}) + disable_image(image_id) + disable_image(image_id, params::Dict{String,<:Any}) -Cancels the deprecation of the specified AMI. For more information, see Deprecate an AMI in -the Amazon EC2 User Guide. +Sets the AMI state to disabled and removes all launch permissions from the AMI. A disabled +AMI can't be used for instance launches. A disabled AMI can't be shared. If an AMI was +public or previously shared, it is made private. If an AMI was shared with an Amazon Web +Services account, organization, or Organizational Unit, they lose access to the disabled +AMI. A disabled AMI does not appear in DescribeImages API calls by default. Only the AMI +owner can disable an AMI. You can re-enable a disabled AMI using EnableImage. For more +information, see Disable an AMI in the Amazon EC2 User Guide. # Arguments - `image_id`: The ID of the AMI. @@ -18909,21 +19426,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. """ -function disable_image_deprecation( - ImageId; aws_config::AbstractAWSConfig=global_aws_config() -) +function disable_image(ImageId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( - "DisableImageDeprecation", + "DisableImage", Dict{String,Any}("ImageId" => ImageId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function disable_image_deprecation( +function disable_image( ImageId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( - "DisableImageDeprecation", + "DisableImage", Dict{String,Any}(mergewith(_merge, Dict{String,Any}("ImageId" => ImageId), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -18931,58 +19446,52 @@ function disable_image_deprecation( end """ - disable_ipam_organization_admin_account(delegated_admin_account_id) - disable_ipam_organization_admin_account(delegated_admin_account_id, params::Dict{String,<:Any}) - -Disable the IPAM account. For more information, see Enable integration with Organizations -in the Amazon VPC IPAM User Guide. + disable_image_block_public_access() + disable_image_block_public_access(params::Dict{String,<:Any}) -# Arguments -- `delegated_admin_account_id`: The Organizations member account ID that you want to - disable as IPAM account. +Disables block public access for AMIs at the account level in the specified Amazon Web +Services Region. This removes the block public access restriction from your account. With +the restriction removed, you can publicly share your AMIs in the specified Amazon Web +Services Region. The API can take up to 10 minutes to configure this setting. During this +time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. +When the API has completed the configuration, the response will be unblocked. For more +information, see Block public access to your AMIs in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DryRun"`: A check for whether you have the required permissions for the action without - actually making the request and provides an error response. If you have the required +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. """ -function disable_ipam_organization_admin_account( - DelegatedAdminAccountId; aws_config::AbstractAWSConfig=global_aws_config() +function disable_image_block_public_access(; + aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( - "DisableIpamOrganizationAdminAccount", - Dict{String,Any}("DelegatedAdminAccountId" => DelegatedAdminAccountId); + "DisableImageBlockPublicAccess"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function disable_ipam_organization_admin_account( - DelegatedAdminAccountId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), +function disable_image_block_public_access( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( - "DisableIpamOrganizationAdminAccount", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("DelegatedAdminAccountId" => DelegatedAdminAccountId), - params, - ), - ); + "DisableImageBlockPublicAccess", + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - disable_serial_console_access() - disable_serial_console_access(params::Dict{String,<:Any}) + disable_image_deprecation(image_id) + disable_image_deprecation(image_id, params::Dict{String,<:Any}) -Disables access to the EC2 serial console of all instances for your account. By default, -access to the EC2 serial console is disabled for your account. For more information, see -Manage account access to the EC2 serial console in the Amazon EC2 User Guide. +Cancels the deprecation of the specified AMI. For more information, see Deprecate an AMI in +the Amazon EC2 User Guide. + +# Arguments +- `image_id`: The ID of the AMI. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -18990,9 +19499,130 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. """ -function disable_serial_console_access(; aws_config::AbstractAWSConfig=global_aws_config()) +function disable_image_deprecation( + ImageId; aws_config::AbstractAWSConfig=global_aws_config() +) return ec2( - "DisableSerialConsoleAccess"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DisableImageDeprecation", + Dict{String,Any}("ImageId" => ImageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_image_deprecation( + ImageId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableImageDeprecation", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("ImageId" => ImageId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disable_image_deregistration_protection(image_id) + disable_image_deregistration_protection(image_id, params::Dict{String,<:Any}) + +Disables deregistration protection for an AMI. When deregistration protection is disabled, +the AMI can be deregistered. If you chose to include a 24-hour cooldown period when you +enabled deregistration protection for the AMI, then, when you disable deregistration +protection, you won’t immediately be able to deregister the AMI. For more information, +see Protect an AMI from deregistration in the Amazon EC2 User Guide. + +# Arguments +- `image_id`: The ID of the AMI. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function disable_image_deregistration_protection( + ImageId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableImageDeregistrationProtection", + Dict{String,Any}("ImageId" => ImageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_image_deregistration_protection( + ImageId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableImageDeregistrationProtection", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("ImageId" => ImageId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disable_ipam_organization_admin_account(delegated_admin_account_id) + disable_ipam_organization_admin_account(delegated_admin_account_id, params::Dict{String,<:Any}) + +Disable the IPAM account. For more information, see Enable integration with Organizations +in the Amazon VPC IPAM User Guide. + +# Arguments +- `delegated_admin_account_id`: The Organizations member account ID that you want to + disable as IPAM account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function disable_ipam_organization_admin_account( + DelegatedAdminAccountId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableIpamOrganizationAdminAccount", + Dict{String,Any}("DelegatedAdminAccountId" => DelegatedAdminAccountId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_ipam_organization_admin_account( + DelegatedAdminAccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DisableIpamOrganizationAdminAccount", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DelegatedAdminAccountId" => DelegatedAdminAccountId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disable_serial_console_access() + disable_serial_console_access(params::Dict{String,<:Any}) + +Disables access to the EC2 serial console of all instances for your account. By default, +access to the EC2 serial console is disabled for your account. For more information, see +Manage account access to the EC2 serial console in the Amazon EC2 User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function disable_serial_console_access(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "DisableSerialConsoleAccess"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function disable_serial_console_access( @@ -19006,6 +19636,44 @@ function disable_serial_console_access( ) end +""" + disable_snapshot_block_public_access() + disable_snapshot_block_public_access(params::Dict{String,<:Any}) + +Disables the block public access for snapshots setting at the account level for the +specified Amazon Web Services Region. After you disable block public access for snapshots +in a Region, users can publicly share snapshots in that Region. If block public access is +enabled in block-all-sharing mode, and you disable block public access, all snapshots that +were previously publicly shared are no longer treated as private and they become publicly +accessible again. For more information, see Block public access for snapshots in the +Amazon EBS User Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function disable_snapshot_block_public_access(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableSnapshotBlockPublicAccess"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_snapshot_block_public_access( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisableSnapshotBlockPublicAccess", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disable_transit_gateway_route_table_propagation(transit_gateway_route_table_id) disable_transit_gateway_route_table_propagation(transit_gateway_route_table_id, params::Dict{String,<:Any}) @@ -19106,10 +19774,8 @@ end disable_vpc_classic_link(vpc_id) disable_vpc_classic_link(vpc_id, params::Dict{String,<:Any}) -Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has -EC2-Classic instances linked to it. We are retiring EC2-Classic. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Disables ClassicLink for a VPC. You cannot disable ClassicLink +for a VPC that has EC2-Classic instances linked to it. # Arguments - `vpc_id`: The ID of the VPC. @@ -19143,12 +19809,10 @@ end disable_vpc_classic_link_dns_support() disable_vpc_classic_link_dns_support(params::Dict{String,<:Any}) -Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP -addresses when addressed between a linked EC2-Classic instance and instances in the VPC to -which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. You must specify a VPC ID in the request. We are retiring EC2-Classic. -We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Disables ClassicLink DNS support for a VPC. If disabled, DNS +hostnames resolve to public IP addresses when addressed between a linked EC2-Classic +instance and instances in the VPC to which it's linked. You must specify a VPC ID in the +request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19403,6 +20067,50 @@ function disassociate_instance_event_window( ) end +""" + disassociate_ipam_byoasn(asn, cidr) + disassociate_ipam_byoasn(asn, cidr, params::Dict{String,<:Any}) + +Remove the association between your Autonomous System Number (ASN) and your BYOIP CIDR. You +may want to use this action to disassociate an ASN from a CIDR or if you want to swap ASNs. +For more information, see Tutorial: Bring your ASN to IPAM in the Amazon VPC IPAM guide. + +# Arguments +- `asn`: A public 2-byte or 4-byte ASN. +- `cidr`: A BYOIP CIDR. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function disassociate_ipam_byoasn( + Asn, Cidr; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DisassociateIpamByoasn", + Dict{String,Any}("Asn" => Asn, "Cidr" => Cidr); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_ipam_byoasn( + Asn, + Cidr, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DisassociateIpamByoasn", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("Asn" => Asn, "Cidr" => Cidr), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_ipam_resource_discovery(ipam_resource_discovery_association_id) disassociate_ipam_resource_discovery(ipam_resource_discovery_association_id, params::Dict{String,<:Any}) @@ -19460,18 +20168,18 @@ end Disassociates secondary Elastic IP addresses (EIPs) from a public NAT gateway. You cannot disassociate your primary EIP. For more information, see Edit secondary IP address -associations in the Amazon Virtual Private Cloud User Guide. While disassociating is in -progress, you cannot associate/disassociate additional EIPs while the connections are being -drained. You are, however, allowed to delete the NAT gateway. An EIP will only be released -at the end of MaxDrainDurationSeconds. The EIPs stay associated and support the existing -connections but do not support any new connections (new connections are distributed across -the remaining associated EIPs). As the existing connections drain out, the EIPs (and the -corresponding private IPs mapped to them) get released. +associations in the Amazon VPC User Guide. While disassociating is in progress, you cannot +associate/disassociate additional EIPs while the connections are being drained. You are, +however, allowed to delete the NAT gateway. An EIP is released only at the end of +MaxDrainDurationSeconds. It stays associated and supports the existing connections but does +not support any new connections (new connections are distributed across the remaining +associated EIPs). As the existing connections drain out, the EIPs (and the corresponding +private IP addresses mapped to them) are released. # Arguments - `association_id`: The association IDs of EIPs that have been associated with the NAT gateway. -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19521,7 +20229,7 @@ end Disassociates a subnet or gateway from a route table. After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route tables in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `association_id`: The association ID representing the current association between the @@ -19768,9 +20476,7 @@ end disassociate_trunk_interface(association_id) disassociate_trunk_interface(association_id, params::Dict{String,<:Any}) - This API action is currently in limited preview only. If you are interested in using this -feature, contact your account manager. Removes an association between a branch network -interface with a trunk network interface. +Removes an association between a branch network interface with a trunk network interface. # Arguments - `association_id`: The ID of the association @@ -19778,7 +20484,7 @@ interface with a trunk network interface. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to Ensure Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -19860,7 +20566,7 @@ end enable_address_transfer(allocation_id, transfer_account_id, params::Dict{String,<:Any}) Enables Elastic IP address transfer. For more information, see Transfer Elastic IP -addresses in the Amazon Virtual Private Cloud User Guide. +addresses in the Amazon VPC User Guide. # Arguments - `allocation_id`: The allocation ID of an Elastic IP address. @@ -19915,14 +20621,16 @@ Enables Infrastructure Performance subscriptions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Destination"`: The target Region or Availability Zone that the metric subscription is - enabled for. For example, eu-west-1. +- `"Destination"`: The target Region (like us-east-2) or Availability Zone ID (like + use2-az2) that the metric subscription is enabled for. If you use Availability Zone IDs, + the Source and Destination Availability Zones must be in the same Region. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"Metric"`: The metric used for the enabled subscription. -- `"Source"`: The source Region or Availability Zone that the metric subscription is - enabled for. For example, us-east-1. +- `"Source"`: The source Region (like us-east-1) or Availability Zone ID (like use1-az1) + that the metric subscription is enabled for. If you use Availability Zone IDs, the Source + and Destination Availability Zones must be in the same Region. - `"Statistic"`: The statistic used for the enabled subscription. """ function enable_aws_network_performance_metric_subscription(; @@ -19952,12 +20660,12 @@ end Enables EBS encryption by default for your account in the current Region. After you enable encryption by default, the EBS volumes that you create are always encrypted, either using the default KMS key or the KMS key that you specified when you created each volume. For -more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide. -You can specify the default KMS key for encryption by default using -ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId. Enabling encryption by default has no -effect on the encryption status of your existing volumes. After you enable encryption by -default, you can no longer launch instances using instance types that do not support -encryption. For more information, see Supported instance types. +more information, see Amazon EBS encryption in the Amazon EBS User Guide. You can specify +the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId or +ResetEbsDefaultKmsKeyId. Enabling encryption by default has no effect on the encryption +status of your existing volumes. After you enable encryption by default, you can no longer +launch instances using instance types that do not support encryption. For more information, +see Supported instance types. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19989,15 +20697,16 @@ end enable_fast_launch(image_id) enable_fast_launch(image_id, params::Dict{String,<:Any}) -When you enable faster launching for a Windows AMI, images are pre-provisioned, using +When you enable Windows fast launch for a Windows AMI, images are pre-provisioned, using snapshots to launch instances up to 65% faster. To create the optimized Windows image, Amazon EC2 launches an instance and runs through Sysprep steps, rebooting as required. Then it creates a set of reserved snapshots that are used for subsequent launches. The reserved snapshots are automatically replenished as they are used, depending on your settings for -launch frequency. To change these settings, you must own the AMI. +launch frequency. You can only change these settings for Windows AMIs that you own or that +have been shared with you. # Arguments -- `image_id`: The ID of the image for which you’re enabling faster launching. +- `image_id`: Specify the ID of the image for which to enable Windows fast launch. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -20008,12 +20717,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys pre-provisioned snapshots. Launch template parameters can include either the name or ID of the launch template, but not both. - `"MaxParallelLaunches"`: The maximum number of instances that Amazon EC2 can launch at - the same time to create pre-provisioned snapshots for Windows faster launching. Value must - be 6 or greater. -- `"ResourceType"`: The type of resource to use for pre-provisioning the Windows AMI for - faster launching. Supported values include: snapshot, which is the default value. + the same time to create pre-provisioned snapshots for Windows fast launch. Value must be 6 + or greater. +- `"ResourceType"`: The type of resource to use for pre-provisioning the AMI for Windows + fast launch. Supported values include: snapshot, which is the default value. - `"SnapshotConfiguration"`: Configuration settings for creating and managing the snapshots - that are used for pre-provisioning the Windows AMI for faster launching. The associated + that are used for pre-provisioning the AMI for Windows fast launch. The associated ResourceType must be snapshot. """ function enable_fast_launch(ImageId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -20044,7 +20753,7 @@ Zones. You get the full benefit of fast snapshot restores after they enter the e state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. To disable fast snapshot restores, use DisableFastSnapshotRestores. For more information, see Amazon EBS fast snapshot restore in -the Amazon Elastic Compute Cloud User Guide. +the Amazon EBS User Guide. # Arguments - `availability_zone`: One or more Availability Zones. For example, us-east-2a. @@ -20093,6 +20802,99 @@ function enable_fast_snapshot_restores( ) end +""" + enable_image(image_id) + enable_image(image_id, params::Dict{String,<:Any}) + +Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for +instance launches, appears in describe operations, and can be shared. Amazon Web Services +accounts, organizations, and Organizational Units that lost access to the AMI when it was +disabled do not regain access automatically. Once the AMI is available, it can be shared +with them again. Only the AMI owner can re-enable a disabled AMI. For more information, see +Disable an AMI in the Amazon EC2 User Guide. + +# Arguments +- `image_id`: The ID of the AMI. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function enable_image(ImageId; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "EnableImage", + Dict{String,Any}("ImageId" => ImageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_image( + ImageId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableImage", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("ImageId" => ImageId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + enable_image_block_public_access(image_block_public_access_state) + enable_image_block_public_access(image_block_public_access_state, params::Dict{String,<:Any}) + +Enables block public access for AMIs at the account level in the specified Amazon Web +Services Region. This prevents the public sharing of your AMIs. However, if you already +have public AMIs, they will remain publicly available. The API can take up to 10 minutes to +configure this setting. During this time, if you run GetImageBlockPublicAccessState, the +response will be unblocked. When the API has completed the configuration, the response will +be block-new-sharing. For more information, see Block public access to your AMIs in the +Amazon EC2 User Guide. + +# Arguments +- `image_block_public_access_state`: Specify block-new-sharing to enable block public + access for AMIs at the account level in the specified Region. This will block any attempt + to publicly share your AMIs in the specified Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function enable_image_block_public_access( + ImageBlockPublicAccessState; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableImageBlockPublicAccess", + Dict{String,Any}("ImageBlockPublicAccessState" => ImageBlockPublicAccessState); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_image_block_public_access( + ImageBlockPublicAccessState, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "EnableImageBlockPublicAccess", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ImageBlockPublicAccessState" => ImageBlockPublicAccessState + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ enable_image_deprecation(deprecate_at, image_id) enable_image_deprecation(deprecate_at, image_id, params::Dict{String,<:Any}) @@ -20144,6 +20946,47 @@ function enable_image_deprecation( ) end +""" + enable_image_deregistration_protection(image_id) + enable_image_deregistration_protection(image_id, params::Dict{String,<:Any}) + +Enables deregistration protection for an AMI. When deregistration protection is enabled, +the AMI can't be deregistered. To allow the AMI to be deregistered, you must first disable +deregistration protection using DisableImageDeregistrationProtection. For more information, +see Protect an AMI from deregistration in the Amazon EC2 User Guide. + +# Arguments +- `image_id`: The ID of the AMI. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"WithCooldown"`: If true, enforces deregistration protection for 24 hours after + deregistration protection is disabled. +""" +function enable_image_deregistration_protection( + ImageId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableImageDeregistrationProtection", + Dict{String,Any}("ImageId" => ImageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_image_deregistration_protection( + ImageId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableImageDeregistrationProtection", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("ImageId" => ImageId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ enable_ipam_organization_admin_account(delegated_admin_account_id) enable_ipam_organization_admin_account(delegated_admin_account_id, params::Dict{String,<:Any}) @@ -20257,6 +21100,62 @@ function enable_serial_console_access( ) end +""" + enable_snapshot_block_public_access(state) + enable_snapshot_block_public_access(state, params::Dict{String,<:Any}) + +Enables or modifies the block public access for snapshots setting at the account level for +the specified Amazon Web Services Region. After you enable block public access for +snapshots in a Region, users can no longer request public sharing for snapshots in that +Region. Snapshots that are already publicly shared are either treated as private or they +remain publicly shared, depending on the State that you specify. If block public access is +enabled in block-all-sharing mode, and you change the mode to block-new-sharing, all +snapshots that were previously publicly shared are no longer treated as private and they +become publicly accessible again. For more information, see Block public access for +snapshots in the Amazon EBS User Guide. + +# Arguments +- `state`: The mode in which to enable block public access for snapshots for the Region. + Specify one of the following values: block-all-sharing - Prevents all public sharing of + snapshots in the Region. Users in the account will no longer be able to request new public + sharing. Additionally, snapshots that are already publicly shared are treated as private + and they are no longer publicly available. If you enable block public access for snapshots + in block-all-sharing mode, it does not change the permissions for snapshots that are + already publicly shared. Instead, it prevents these snapshots from be publicly visible and + publicly accessible. Therefore, the attributes for these snapshots still indicate that they + are publicly shared, even though they are not publicly available. block-new-sharing - + Prevents only new public sharing of snapshots in the Region. Users in the account will no + longer be able to request new public sharing. However, snapshots that are already publicly + shared, remain publicly available. unblocked is not a valid value for + EnableSnapshotBlockPublicAccess. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function enable_snapshot_block_public_access( + State; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableSnapshotBlockPublicAccess", + Dict{String,Any}("State" => State); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_snapshot_block_public_access( + State, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "EnableSnapshotBlockPublicAccess", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("State" => State), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ enable_transit_gateway_route_table_propagation(transit_gateway_route_table_id) enable_transit_gateway_route_table_propagation(transit_gateway_route_table_id, params::Dict{String,<:Any}) @@ -20400,14 +21299,11 @@ end enable_vpc_classic_link(vpc_id) enable_vpc_classic_link(vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your -ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable -your VPC for ClassicLink if any of your VPC route tables have existing routes for address -ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the -10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the -Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Enables a VPC for ClassicLink. You can then link EC2-Classic +instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. +You cannot enable your VPC for ClassicLink if any of your VPC route tables have existing +routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes +for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. # Arguments - `vpc_id`: The ID of the VPC. @@ -20441,14 +21337,11 @@ end enable_vpc_classic_link_dns_support() enable_vpc_classic_link_dns_support(params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, -the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when -addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of -an instance in a VPC resolves to its private IP address when addressed from a linked -EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. You must specify a VPC ID in the request. + This action is deprecated. Enables a VPC to support DNS hostname resolution for +ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its +private IP address when addressed from an instance in the VPC to which it's linked. +Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when +addressed from a linked EC2-Classic instance. You must specify a VPC ID in the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -20642,7 +21535,7 @@ end Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range. The routes are saved to the specified bucket in a JSON file. For more information, see Export -Route Tables to Amazon S3 in Transit Gateways. +route tables to Amazon S3 in the Amazon Web Services Transit Gateways Guide. # Arguments - `s3_bucket`: The name of the S3 bucket. @@ -20842,10 +21735,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"MaxResults"`: The maximum number of results to return for the request in a single page. - The remaining results can be seen by sending another request with the returned nextToken - value. This value can be between 5 and 500. If maxResults is given a larger value than 500, - you receive an error. Valid range: Minimum value of 1. Maximum value of 1000. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"NextToken"`: The token to use to retrieve the next page of results. """ function get_capacity_reservation_usage( @@ -20973,7 +21865,8 @@ end get_console_screenshot(instance_id, params::Dict{String,<:Any}) Retrieve a JPG-format screenshot of a running instance to help with troubleshooting. The -returned content is Base64-encoded. +returned content is Base64-encoded. For more information, see Instance console output in +the Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -21060,7 +21953,7 @@ end Describes the default KMS key for EBS encryption by default for your account in this Region. You can change the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId. For more information, see Amazon EBS -encryption in the Amazon Elastic Compute Cloud User Guide. +encryption in the Amazon EBS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21089,8 +21982,7 @@ end get_ebs_encryption_by_default(params::Dict{String,<:Any}) Describes whether EBS encryption by default is enabled for your account in the current -Region. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud -User Guide. +Region. For more information, see Amazon EBS encryption in the Amazon EBS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21125,6 +22017,8 @@ the template to do the following: Create a table in Athena that maps fields to log format Create a Lambda function that updates the table with new partitions on a daily, weekly, or monthly basis Create a table partitioned between two timestamps in the past Create a set of named queries in Athena that you can use to get started quickly +GetFlowLogsIntegrationTemplate does not support integration between Amazon Web Services +Transit Gateway Flow Logs and Amazon Athena. # Arguments - `config_delivery_s3_destination_arn`: To store the CloudFormation template in Amazon S3, @@ -21196,10 +22090,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"MaxResults"`: The maximum number of results to return for the request in a single page. - The remaining results can be seen by sending another request with the returned nextToken - value. This value can be between 5 and 500. If maxResults is given a larger value than 500, - you receive an error. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. - `"NextToken"`: The token to use to retrieve the next page of results. """ function get_groups_for_capacity_reservation( @@ -21276,6 +22169,129 @@ function get_host_reservation_purchase_preview( ) end +""" + get_image_block_public_access_state() + get_image_block_public_access_state(params::Dict{String,<:Any}) + +Gets the current state of block public access for AMIs at the account level in the +specified Amazon Web Services Region. For more information, see Block public access to your +AMIs in the Amazon EC2 User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function get_image_block_public_access_state(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetImageBlockPublicAccessState"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_block_public_access_state( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetImageBlockPublicAccessState", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_instance_metadata_defaults() + get_instance_metadata_defaults(params::Dict{String,<:Any}) + +Gets the default instance metadata service (IMDS) settings that are set at the account +level in the specified Amazon Web Services
 Region. For more information, see Order +of precedence for instance metadata options in the Amazon EC2 User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function get_instance_metadata_defaults(; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "GetInstanceMetadataDefaults"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_instance_metadata_defaults( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetInstanceMetadataDefaults", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_instance_tpm_ek_pub(instance_id, key_format, key_type) + get_instance_tpm_ek_pub(instance_id, key_format, key_type, params::Dict{String,<:Any}) + +Gets the public endorsement key associated with the Nitro Trusted Platform Module +(NitroTPM) for the specified instance. + +# Arguments +- `instance_id`: The ID of the instance for which to get the public endorsement key. +- `key_format`: The required public endorsement key format. Specify der for a DER-encoded + public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format that is + compatible with tpm2-tools. The returned key is base64 encoded. +- `key_type`: The required public endorsement key type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Specify this parameter to verify whether the request will succeed, without + actually making the request. If the request will succeed, the response is DryRunOperation. + Otherwise, the response is UnauthorizedOperation. +""" +function get_instance_tpm_ek_pub( + InstanceId, KeyFormat, KeyType; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetInstanceTpmEkPub", + Dict{String,Any}( + "InstanceId" => InstanceId, "KeyFormat" => KeyFormat, "KeyType" => KeyType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_instance_tpm_ek_pub( + InstanceId, + KeyFormat, + KeyType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "GetInstanceTpmEkPub", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceId" => InstanceId, + "KeyFormat" => KeyFormat, + "KeyType" => KeyType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_instance_types_from_instance_requirements(architecture_type, instance_requirements, virtualization_type) get_instance_types_from_instance_requirements(architecture_type, instance_requirements, virtualization_type, params::Dict{String,<:Any}) @@ -21515,6 +22531,64 @@ function get_ipam_discovered_accounts( ) end +""" + get_ipam_discovered_public_addresses(address_region, ipam_resource_discovery_id) + get_ipam_discovered_public_addresses(address_region, ipam_resource_discovery_id, params::Dict{String,<:Any}) + +Gets the public IP addresses that have been discovered by IPAM. + +# Arguments +- `address_region`: The Amazon Web Services Region for the IP address. +- `ipam_resource_discovery_id`: An IPAM resource discovery ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: Filters. +- `"MaxResults"`: The maximum number of IPAM discovered public addresses to return in one + page of results. +- `"NextToken"`: The token for the next page of results. +""" +function get_ipam_discovered_public_addresses( + AddressRegion, + IpamResourceDiscoveryId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "GetIpamDiscoveredPublicAddresses", + Dict{String,Any}( + "AddressRegion" => AddressRegion, + "IpamResourceDiscoveryId" => IpamResourceDiscoveryId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ipam_discovered_public_addresses( + AddressRegion, + IpamResourceDiscoveryId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "GetIpamDiscoveredPublicAddresses", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AddressRegion" => AddressRegion, + "IpamResourceDiscoveryId" => IpamResourceDiscoveryId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_ipam_discovered_resource_cidrs(ipam_resource_discovery_id, resource_region) get_ipam_discovered_resource_cidrs(ipam_resource_discovery_id, resource_region, params::Dict{String,<:Any}) @@ -21727,8 +22801,8 @@ Retrieves the configuration data of the specified instance. You can use this dat a launch template. This action calls on other describe actions to get instance information. Depending on your instance configuration, you may need to allow the following actions in your IAM policy: DescribeSpotInstanceRequests, -DescribeInstanceCreditSpecifications, DescribeVolumes, DescribeInstanceAttribute, and -DescribeElasticGpus. Or, you can allow describe* depending on your instance requirements. +DescribeInstanceCreditSpecifications, DescribeVolumes, and DescribeInstanceAttribute. Or, +you can allow describe* depending on your instance requirements. # Arguments - `instance_id`: The ID of the instance. @@ -22019,28 +23093,74 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TargetConfiguration"`: The configuration of the target Convertible Reserved Instance to exchange for your current Convertible Reserved Instances. """ -function get_reserved_instances_exchange_quote( - ReservedInstanceId; aws_config::AbstractAWSConfig=global_aws_config() +function get_reserved_instances_exchange_quote( + ReservedInstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetReservedInstancesExchangeQuote", + Dict{String,Any}("ReservedInstanceId" => ReservedInstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_reserved_instances_exchange_quote( + ReservedInstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "GetReservedInstancesExchangeQuote", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ReservedInstanceId" => ReservedInstanceId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_security_groups_for_vpc(vpc_id) + get_security_groups_for_vpc(vpc_id, params::Dict{String,<:Any}) + +Gets security groups that can be associated by the Amazon Web Services account making the +request with network interfaces in the specified VPC. + +# Arguments +- `vpc_id`: The VPC ID where the security group can be used. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: The filters. If using multiple filters, the results include security groups + which match all filters. group-id: The security group ID. description: The security + group's description. group-name: The security group name. owner-id: The security + group owner ID. primary-vpc-id: The VPC ID in which the security group was created. +- `"MaxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. For more + information, see Pagination. +- `"NextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function get_security_groups_for_vpc( + VpcId; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( - "GetReservedInstancesExchangeQuote", - Dict{String,Any}("ReservedInstanceId" => ReservedInstanceId); + "GetSecurityGroupsForVpc", + Dict{String,Any}("VpcId" => VpcId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_reserved_instances_exchange_quote( - ReservedInstanceId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), +function get_security_groups_for_vpc( + VpcId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( - "GetReservedInstancesExchangeQuote", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("ReservedInstanceId" => ReservedInstanceId), params - ), - ); + "GetSecurityGroupsForVpc", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("VpcId" => VpcId), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -22081,6 +23201,40 @@ function get_serial_console_access_status( ) end +""" + get_snapshot_block_public_access_state() + get_snapshot_block_public_access_state(params::Dict{String,<:Any}) + +Gets the current state of block public access for snapshots setting for the account and +Region. For more information, see Block public access for snapshots in the Amazon EBS User +Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function get_snapshot_block_public_access_state(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetSnapshotBlockPublicAccessState"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_snapshot_block_public_access_state( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "GetSnapshotBlockPublicAccessState", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_spot_placement_scores(target_capacity) get_spot_placement_scores(target_capacity, params::Dict{String,<:Any}) @@ -22119,8 +23273,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Availability Zones. Otherwise, the response returns a list of scored Regions. A list of scored Availability Zones is useful if you want to launch all of your Spot capacity into a single Availability Zone. -- `"TargetCapacityUnitType"`: The unit for the target capacity. Default: units (translates - to number of instances) +- `"TargetCapacityUnitType"`: The unit for the target capacity. """ function get_spot_placement_scores( TargetCapacity; aws_config::AbstractAWSConfig=global_aws_config() @@ -22892,7 +24045,9 @@ the VM Import/Export User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Architecture"`: The architecture of the virtual machine. Valid values: i386 | x86_64 -- `"BootMode"`: The boot mode of the virtual machine. +- `"BootMode"`: The boot mode of the virtual machine. The uefi-preferred boot mode isn't + supported for importing images. For more information, see Boot modes in the VM + Import/Export User Guide. - `"ClientData"`: The client-specific data. - `"ClientToken"`: The token to enable idempotency for VM import requests. - `"Description"`: A description string for the import image task. @@ -22909,12 +24064,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys AMI. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. The KMS key identifier may be provided in - any of the following formats: Key ID Key alias. The alias ARN contains the arn:aws:kms - namespace, followed by the Region of the key, the Amazon Web Services account ID of the key - owner, the alias namespace, and then the key alias. For example, - arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. ARN using key ID. The ID ARN - contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web - Services account ID of the key owner, the key namespace, and then the key ID. For example, + any of the following formats: Key ID Key alias ARN using key ID. The ID ARN contains + the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services + account ID of the key owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias namespace, and then the @@ -22932,7 +24084,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Linux operating system. To use BYOL, you must have existing licenses with rights to use these licenses in a third party cloud, such as Amazon Web Services. For more information, see Prerequisites in the VM Import/Export User Guide. -- `"Platform"`: The operating system of the virtual machine. Valid values: Windows | Linux +- `"Platform"`: The operating system of the virtual machine. If you import a VM that is + compatible with Unified Extensible Firmware Interface (UEFI) using an EBS snapshot, you + must specify a value for the platform. Valid values: Windows | Linux - `"RoleName"`: The name of the role to use when not using the default role, 'vmimport'. - `"TagSpecification"`: The tags to apply to the import image task during creation. - `"UsageOperation"`: The usage operation value. For more information, see Licensing @@ -22953,12 +24107,14 @@ end import_instance(platform) import_instance(platform, params::Dict{String,<:Any}) -Creates an import instance task using metadata from the specified disk image. This API -action supports only single-volume VMs. To import multi-volume VMs, use ImportImage -instead. This API action is not supported by the Command Line Interface (CLI). For -information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to -Amazon EC2 in the Amazon EC2 CLI Reference PDF file. For information about the import -manifest referenced by this API action, see VM Import Manifest. + We recommend that you use the ImportImage API. For more information, see Importing a VM +as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import +instance task using metadata from the specified disk image. This API action is not +supported by the Command Line Interface (CLI). For information about using the Amazon EC2 +CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference +PDF file. This API action supports only single-volume VMs. To import multi-volume VMs, use +ImportImage instead. For information about the import manifest referenced by this API +action, see VM Import Manifest. # Arguments - `platform`: The instance operating system. @@ -23075,12 +24231,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys snapshot. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. The KMS key identifier may be provided in - any of the following formats: Key ID Key alias. The alias ARN contains the arn:aws:kms - namespace, followed by the Region of the key, the Amazon Web Services account ID of the key - owner, the alias namespace, and then the key alias. For example, - arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. ARN using key ID. The ID ARN - contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web - Services account ID of the key owner, the key namespace, and then the key ID. For example, + any of the following formats: Key ID Key alias ARN using key ID. The ID ARN contains + the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services + account ID of the key owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias namespace, and then the @@ -23235,6 +24388,87 @@ function list_snapshots_in_recycle_bin( ) end +""" + lock_snapshot(lock_mode, snapshot_id) + lock_snapshot(lock_mode, snapshot_id, params::Dict{String,<:Any}) + +Locks an Amazon EBS snapshot in either governance or compliance mode to protect it against +accidental or malicious deletions for a specific duration. A locked snapshot can't be +deleted. You can also use this action to modify the lock settings for a snapshot that is +already locked. The allowed modifications depend on the lock mode and lock state: If the +snapshot is locked in governance mode, you can modify the lock mode and the lock duration +or lock expiration date. If the snapshot is locked in compliance mode and it is in the +cooling-off period, you can modify the lock mode and the lock duration or lock expiration +date. If the snapshot is locked in compliance mode and the cooling-off period has lapsed, +you can only increase the lock duration or extend the lock expiration date. + +# Arguments +- `lock_mode`: The mode in which to lock the snapshot. Specify one of the following: + governance - Locks the snapshot in governance mode. Snapshots locked in governance mode + can't be deleted until one of the following conditions are met: The lock duration + expires. The snapshot is unlocked by a user with the appropriate permissions. Users + with the appropriate IAM permissions can unlock the snapshot, increase or decrease the lock + duration, and change the lock mode to compliance at any time. If you lock a snapshot in + governance mode, omit CoolOffPeriod. compliance - Locks the snapshot in compliance + mode. Snapshots locked in compliance mode can't be unlocked by any user. They can be + deleted only after the lock duration expires. Users can't decrease the lock duration or + change the lock mode to governance. However, users with appropriate IAM permissions can + increase the lock duration at any time. If you lock a snapshot in compliance mode, you can + optionally specify CoolOffPeriod. +- `snapshot_id`: The ID of the snapshot to lock. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CoolOffPeriod"`: The cooling-off period during which you can unlock the snapshot or + modify the lock settings after locking the snapshot in compliance mode, in hours. After the + cooling-off period expires, you can't unlock or delete the snapshot, decrease the lock + duration, or change the lock mode. You can increase the lock duration after the cooling-off + period expires. The cooling-off period is optional when locking a snapshot in compliance + mode. If you are locking the snapshot in governance mode, omit this parameter. To lock the + snapshot in compliance mode immediately without a cooling-off period, omit this parameter. + If you are extending the lock duration for a snapshot that is locked in compliance mode + after the cooling-off period has expired, omit this parameter. If you specify a + cooling-period in a such a request, the request fails. Allowed values: Min 1, max 72. +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"ExpirationDate"`: The date and time at which the snapshot lock is to automatically + expire, in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ). You must specify either this + parameter or LockDuration, but not both. +- `"LockDuration"`: The period of time for which to lock the snapshot, in days. The + snapshot lock will automatically expire after this period lapses. You must specify either + this parameter or ExpirationDate, but not both. Allowed values: Min: 1, max 36500 +""" +function lock_snapshot( + LockMode, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "LockSnapshot", + Dict{String,Any}("LockMode" => LockMode, "SnapshotId" => SnapshotId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function lock_snapshot( + LockMode, + SnapshotId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "LockSnapshot", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("LockMode" => LockMode, "SnapshotId" => SnapshotId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_address_attribute(allocation_id) modify_address_attribute(allocation_id, params::Dict{String,<:Any}) @@ -23281,15 +24515,14 @@ end modify_availability_zone_group(group_name, opt_in_status) modify_availability_zone_group(group_name, opt_in_status, params::Dict{String,<:Any}) -Changes the opt-in status of the Local Zone and Wavelength Zone group for your account. Use - DescribeAvailabilityZones to view the value for GroupName. +Changes the opt-in status of the specified zone group for your account. # Arguments - `group_name`: The name of the Availability Zone group, Local Zone group, or Wavelength Zone group. -- `opt_in_status`: Indicates whether you are opted in to the Local Zone group or Wavelength - Zone group. The only valid value is opted-in. You must contact Amazon Web Services Support - to opt out of a Local Zone or Wavelength Zone group. +- `opt_in_status`: Indicates whether to opt in to the zone group. The only valid value is + opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or + Wavelength Zone group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -23597,15 +24830,14 @@ the default KMS key to the Amazon Web Services managed KMS key for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric KMS keys. If you delete or disable the customer managed KMS key that you specified for use with encryption by default, your instances will fail to launch. For more information, see Amazon EBS encryption in the -Amazon Elastic Compute Cloud User Guide. +Amazon EBS User Guide. # Arguments -- `kms_key_id`: The identifier of the Key Management Service (KMS) KMS key to use for - Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is - used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS - key using any of the following: Key ID. For example, - 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key - ARN. For example, +- `kms_key_id`: The identifier of the KMS key to use for Amazon EBS encryption. If this + parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + the encrypted state must be true. You can specify the KMS key using any of the following: + Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, + alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN @@ -23772,9 +25004,9 @@ instance type only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HostMaintenance"`: Indicates whether to enable or disable host maintenance for the - Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. + Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. - `"HostRecovery"`: Indicates whether to enable or disable host recovery for the Dedicated - Host. For more information, see Host recovery in the Amazon EC2 User Guide. + Host. For more information, see Host recovery in the Amazon EC2 User Guide. - `"InstanceFamily"`: Specifies the instance family to be supported by the Dedicated Host. Specify this parameter to modify a Dedicated Host to support multiple instance types within its current instance family. If you want to modify a Dedicated Host to support a specific @@ -24026,7 +25258,7 @@ stopped. For more information, see Modify a stopped instance in the Amazon EC2 U # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DisableApiStop"`: Indicates whether an instance is enabled for stop protection. For - more information, see Stop Protection. + more information, see Enable stop protection for your instance. - `"GroupId"`: Replaces the security groups of the instance with the specified security groups. You must specify the ID of at least one security group, even if it's just the default security group for the VPC. @@ -24043,9 +25275,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"blockDeviceMapping"`: Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is - terminated. To add instance store volumes to an Amazon EBS-backed instance, you must add - them when you launch the instance. For more information, see Update the block device - mapping when launching an instance in the Amazon EC2 User Guide. + terminated. You can't modify the DeleteOnTermination attribute for volumes that are + attached to Fargate tasks. To add instance store volumes to an Amazon EBS-backed instance, + you must add them when you launch the instance. For more information, see Update the block + device mapping when launching an instance in the Amazon EC2 User Guide. - `"disableApiTermination"`: If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot Instances. @@ -24374,6 +25607,54 @@ function modify_instance_maintenance_options( ) end +""" + modify_instance_metadata_defaults() + modify_instance_metadata_defaults(params::Dict{String,<:Any}) + +Modifies the default instance metadata service (IMDS) settings at the account level in the +specified Amazon Web Services
 Region. To remove a parameter's account-level +default setting, specify no-preference. If an account-level setting is cleared with +no-preference, then the instance launch considers the other instance metadata settings. For +more information, see Order of precedence for instance metadata options in the Amazon EC2 +User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"HttpEndpoint"`: Enables or disables the IMDS endpoint on an instance. When disabled, + the instance metadata can't be accessed. +- `"HttpPutResponseHopLimit"`: The maximum number of hops that the metadata token can + travel. To indicate no preference, specify -1. Possible values: Integers from 1 to 64, and + -1 to indicate no preference +- `"HttpTokens"`: Indicates whether IMDSv2 is required. optional – IMDSv2 is optional, + which means that you can use either IMDSv2 or IMDSv1. required – IMDSv2 is required, + which means that IMDSv1 is disabled, and you must use IMDSv2. +- `"InstanceMetadataTags"`: Enables or disables access to an instance's tags from the + instance metadata. For more information, see Work with instance tags using the instance + metadata in the Amazon EC2 User Guide. +""" +function modify_instance_metadata_defaults(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "ModifyInstanceMetadataDefaults"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_instance_metadata_defaults( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "ModifyInstanceMetadataDefaults", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_instance_metadata_options(instance_id) modify_instance_metadata_options(instance_id, params::Dict{String,<:Any}) @@ -24398,25 +25679,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this parameter is not specified, the existing state is maintained. If you specify a value of disabled, you cannot access your instance metadata. - `"HttpProtocolIpv6"`: Enables or disables the IPv6 endpoint for the instance metadata - service. This setting applies only if you have enabled the HTTP metadata endpoint. + service. Applies only if you enabled the HTTP metadata endpoint. - `"HttpPutResponseHopLimit"`: The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. If no parameter is specified, the existing state is maintained. Possible values: Integers from 1 to 64 -- `"HttpTokens"`: IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional - (in other words, set the use of IMDSv2 to optional) or required (in other words, set the - use of IMDSv2 to required). optional - When IMDSv2 is optional, you can choose to - retrieve instance metadata with or without a session token in your request. If you retrieve - the IAM role credentials without a token, the IMDSv1 role credentials are returned. If you - retrieve the IAM role credentials using a valid session token, the IMDSv2 role credentials - are returned. required - When IMDSv2 is required, you must send a session token with any - instance metadata retrieval requests. In this state, retrieving the IAM role credentials - always returns IMDSv2 credentials; IMDSv1 credentials are not available. Default: - optional +- `"HttpTokens"`: Indicates whether IMDSv2 is required. optional - IMDSv2 is optional. + You can choose whether to send a session token in your instance metadata retrieval + requests. If you retrieve IAM role credentials without a session token, you receive the + IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, + you receive the IMDSv2 role credentials. required - IMDSv2 is required. You must send a + session token in your instance metadata retrieval requests. With this option, retrieving + the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not + available. Default: If the value of ImdsSupport for the Amazon Machine Image (AMI) for + your instance is v2.0 and the account level default is set to no-preference, the default is + required. If the value of ImdsSupport for the Amazon Machine Image (AMI) for your + instance is v2.0, but the account level default is set to V1 or V2, the default is + optional. The default value can also be affected by other combinations of parameters. For + more information, see Order of precedence for instance metadata options in the Amazon EC2 + User Guide. - `"InstanceMetadataTags"`: Set to enabled to allow access to instance tags from the instance metadata. Set to disabled to turn off access to instance tags from the instance metadata. For more information, see Work with instance tags using the instance metadata. - Default: disabled """ function modify_instance_metadata_options( InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -24450,12 +25734,12 @@ end Modifies the placement attributes for a specified instance. You can do the following: Modify the affinity between an instance and a Dedicated Host. When affinity is set to host and the instance is not associated with a specific Dedicated Host, the next time the -instance is launched, it is automatically associated with the host on which it lands. If -the instance is restarted or rebooted, this relationship persists. Change the Dedicated -Host with which an instance is associated. Change the instance tenancy of an instance. -Move an instance to or from a placement group. At least one attribute for affinity, host -ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy -can be modified in the same request. To modify the host ID, tenancy, placement group, or +instance is started, it is automatically associated with the host on which it lands. If the +instance is restarted or rebooted, this relationship persists. Change the Dedicated Host +with which an instance is associated. Change the instance tenancy of an instance. Move +an instance to or from a placement group. At least one attribute for affinity, host ID, +tenancy, or placement group name must be specified in the request. Affinity and tenancy can +be modified in the same request. To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state. # Arguments @@ -24470,14 +25754,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys placement groups, the instance must have a tenancy of default or dedicated. To remove an instance from a placement group, specify an empty string (\"\"). - `"HostResourceGroupArn"`: The ARN of the host resource group in which to place the - instance. + instance. The instance must have a tenancy of host to specify this parameter. - `"PartitionNumber"`: The number of the partition in which to place the instance. Valid only if the placement group strategy is set to partition. -- `"affinity"`: The affinity setting for the instance. +- `"affinity"`: The affinity setting for the instance. For more information, see Host + affinity in the Amazon EC2 User Guide. - `"hostId"`: The ID of the Dedicated Host with which to associate the instance. -- `"tenancy"`: The tenancy for the instance. For T3 instances, you can't change the - tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these - unsupported tenancy changes results in the InvalidTenancy error code. +- `"tenancy"`: The tenancy for the instance. For T3 instances, you must launch the + instance on a Dedicated Host to use a tenancy of host. You can't change the tenancy from + host to dedicated or default. Attempting to make one of these unsupported tenancy changes + results in an InvalidRequest error code. """ function modify_instance_placement( instanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -24525,6 +25811,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"RemoveOperatingRegion"`: The operating Regions to remove. +- `"Tier"`: IPAM is offered in a Free Tier and an Advanced Tier. For more information about + the features available in each tier and the costs associated with the tiers, see Amazon VPC + pricing > IPAM tab. """ function modify_ipam(IpamId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -24791,10 +26080,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the - LaunchTemplateId or the LaunchTemplateName, but not both. +- `"LaunchTemplateId"`: The ID of the launch template. You must specify either the launch + template ID or the launch template name, but not both. - `"LaunchTemplateName"`: The name of the launch template. You must specify either the - LaunchTemplateName or the LaunchTemplateId, but not both. + launch template ID or the launch template name, but not both. - `"SetDefaultVersion"`: The version number of the launch template to set as the default version. """ @@ -24930,8 +26219,24 @@ instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AssociatePublicIpAddress"`: Indicates whether to assign a public IPv4 address to a + network interface. This option can be enabled for any network interface but will only apply + to the primary network interface (eth0). +- `"ConnectionTrackingSpecification"`: A connection tracking specification. - `"EnaSrdSpecification"`: Updates the ENA Express configuration for the network interface that’s attached to the instance. +- `"EnablePrimaryIpv6"`: If you’re modifying a network interface in a dual-stack or + IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 + address is an IPv6 GUA address associated with an ENI that you have enabled to use a + primary IPv6 address. Use this option if the instance that this ENI will be attached to + relies on its IPv6 address not changing. Amazon Web Services will automatically assign an + IPv6 address associated with the ENI attached to your instance to be the primary IPv6 + address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. + When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made + the primary IPv6 address until the instance is terminated or the network interface is + detached. If you have multiple IPv6 addresses associated with an ENI attached to your + instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with + the ENI becomes the primary IPv6 address. - `"SecurityGroupId"`: Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security @@ -25030,7 +26335,7 @@ end Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type. For more information, -see Modifying Reserved Instances in the Amazon EC2 User Guide. +see Modify Reserved Instances in the Amazon EC2 User Guide. # Arguments - `reserved_instances_configuration_set_item_type`: The configuration settings for the @@ -25141,8 +26446,7 @@ remove account IDs for a snapshot, you must use multiple operations. You can mak modifications to a snapshot in a single operation. Encrypted snapshots and snapshots with Amazon Web Services Marketplace product codes cannot be made public. Snapshots encrypted with your default KMS key cannot be shared with other accounts. For more information about -modifying snapshot permissions, see Share a snapshot in the Amazon Elastic Compute Cloud -User Guide. +modifying snapshot permissions, see Share a snapshot in the Amazon EBS User Guide. # Arguments - `snapshot_id`: The ID of the snapshot. @@ -25191,8 +26495,7 @@ end Archives an Amazon EBS snapshot. When you archive a snapshot, it is converted to a full snapshot that includes all of the blocks of data that were written to the volume at the time the snapshot was created, and moved from the standard tier to the archive tier. For -more information, see Archive Amazon EBS snapshots in the Amazon Elastic Compute Cloud User -Guide. +more information, see Archive Amazon EBS snapshots in the Amazon EBS User Guide. # Arguments - `snapshot_id`: The ID of the snapshot. @@ -25336,7 +26639,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys address. When this value is true, you must specify the customer-owned IP pool using CustomerOwnedIpv4Pool. - `"MapPublicIpOnLaunch"`: Specify true to indicate that network interfaces attached to - instances created in the specified subnet should be assigned a public IPv4 address. + instances created in the specified subnet should be assigned a public IPv4 address. Amazon + Web Services charges for all public IPv4 addresses, including public IPv4 addresses + associated with running instances and Elastic IP addresses. For more information, see the + Public IPv4 Address tab on the Amazon VPC pricing page. - `"PrivateDnsHostnameTypeOnLaunch"`: The type of hostname to assign to instances in the subnet at launch. For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) or the instance ID @@ -25500,7 +26806,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire - packet. + packet. For sessions with Network Load Balancer (NLB) traffic mirror targets, the default + PacketLength will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater + than 8500 will result in an error response. - `"RemoveField"`: The properties that you want to remove from the Traffic Mirror session. When you remove a property from a Traffic Mirror session, the property is set to the default. @@ -25705,7 +27013,7 @@ Modifies the configuration of the specified Amazon Web Services Verified Access # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access endpoint. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -25751,33 +27059,31 @@ function modify_verified_access_endpoint( end """ - modify_verified_access_endpoint_policy(policy_enabled, verified_access_endpoint_id) - modify_verified_access_endpoint_policy(policy_enabled, verified_access_endpoint_id, params::Dict{String,<:Any}) + modify_verified_access_endpoint_policy(verified_access_endpoint_id) + modify_verified_access_endpoint_policy(verified_access_endpoint_id, params::Dict{String,<:Any}) Modifies the specified Amazon Web Services Verified Access endpoint policy. # Arguments -- `policy_enabled`: The status of the Verified Access policy. - `verified_access_endpoint_id`: The ID of the Verified Access endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"PolicyDocument"`: The Verified Access policy document. +- `"PolicyEnabled"`: The status of the Verified Access policy. +- `"SseSpecification"`: The options for server side encryption. """ function modify_verified_access_endpoint_policy( - PolicyEnabled, - VerifiedAccessEndpointId; - aws_config::AbstractAWSConfig=global_aws_config(), + VerifiedAccessEndpointId; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( "ModifyVerifiedAccessEndpointPolicy", Dict{String,Any}( - "PolicyEnabled" => PolicyEnabled, "VerifiedAccessEndpointId" => VerifiedAccessEndpointId, "ClientToken" => string(uuid4()), ); @@ -25786,7 +27092,6 @@ function modify_verified_access_endpoint_policy( ) end function modify_verified_access_endpoint_policy( - PolicyEnabled, VerifiedAccessEndpointId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -25797,7 +27102,6 @@ function modify_verified_access_endpoint_policy( mergewith( _merge, Dict{String,Any}( - "PolicyEnabled" => PolicyEnabled, "VerifiedAccessEndpointId" => VerifiedAccessEndpointId, "ClientToken" => string(uuid4()), ), @@ -25821,7 +27125,7 @@ Modifies the specified Amazon Web Services Verified Access group configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access group. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -25864,31 +27168,31 @@ function modify_verified_access_group( end """ - modify_verified_access_group_policy(policy_enabled, verified_access_group_id) - modify_verified_access_group_policy(policy_enabled, verified_access_group_id, params::Dict{String,<:Any}) + modify_verified_access_group_policy(verified_access_group_id) + modify_verified_access_group_policy(verified_access_group_id, params::Dict{String,<:Any}) Modifies the specified Amazon Web Services Verified Access group policy. # Arguments -- `policy_enabled`: The status of the Verified Access policy. - `verified_access_group_id`: The ID of the Verified Access group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"PolicyDocument"`: The Verified Access policy document. +- `"PolicyEnabled"`: The status of the Verified Access policy. +- `"SseSpecification"`: The options for server side encryption. """ function modify_verified_access_group_policy( - PolicyEnabled, VerifiedAccessGroupId; aws_config::AbstractAWSConfig=global_aws_config() + VerifiedAccessGroupId; aws_config::AbstractAWSConfig=global_aws_config() ) return ec2( "ModifyVerifiedAccessGroupPolicy", Dict{String,Any}( - "PolicyEnabled" => PolicyEnabled, "VerifiedAccessGroupId" => VerifiedAccessGroupId, "ClientToken" => string(uuid4()), ); @@ -25897,7 +27201,6 @@ function modify_verified_access_group_policy( ) end function modify_verified_access_group_policy( - PolicyEnabled, VerifiedAccessGroupId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -25908,7 +27211,6 @@ function modify_verified_access_group_policy( mergewith( _merge, Dict{String,Any}( - "PolicyEnabled" => PolicyEnabled, "VerifiedAccessGroupId" => VerifiedAccessGroupId, "ClientToken" => string(uuid4()), ), @@ -25932,7 +27234,7 @@ Modifies the configuration of the specified Amazon Web Services Verified Access # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access instance. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -25987,7 +27289,7 @@ instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -26043,13 +27345,16 @@ provider. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive token that you provide to ensure idempotency of - your modification request. For more information, see Ensuring Idempotency. + your modification request. For more information, see Ensuring idempotency. - `"Description"`: A description for the Verified Access trust provider. +- `"DeviceOptions"`: The options for a device-based trust provider. This parameter is + required when the provider type is device. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"OidcOptions"`: The options for an OpenID Connect-compatible user-identity trust provider. +- `"SseSpecification"`: The options for server side encryption. """ function modify_verified_access_trust_provider( VerifiedAccessTrustProviderId; aws_config::AbstractAWSConfig=global_aws_config() @@ -26094,14 +27399,10 @@ You can modify several parameters of an existing EBS volume, including volume si type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon -EBS Elastic Volumes (Linux instances) or Amazon EBS Elastic Volumes (Windows instances). -When you complete a resize operation on your volume, you need to extend the volume's -file-system size to take advantage of the new storage capacity. For more information, see -Extend a Linux file system or Extend a Windows file system. You can use CloudWatch Events -to check the status of a modification to an EBS volume. For information about CloudWatch -Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a -modification using DescribeVolumesModifications. For information about tracking status -changes using either method, see Monitor the progress of volume modifications. With +EBS Elastic Volumes in the Amazon EBS User Guide. When you complete a resize operation on +your volume, you need to extend the volume's file-system size to take advantage of the new +storage capacity. For more information, see Extend the file system. For more information, +see Monitor the progress of volume modifications in the Amazon EBS User Guide. With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance. After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available @@ -26117,25 +27418,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"Iops"`: The target IOPS rate of the volume. This parameter is valid only for gp3, io1, - and io2 volumes. The following are the supported values for each volume type: gp3: - 3,000-16,000 IOPS io1: 100-64,000 IOPS io2: 100-64,000 IOPS Default: The existing - value is retained if you keep the same volume type. If you change the volume type to io1, - io2, or gp3, the default is 3,000. + and io2 volumes. The following are the supported values for each volume type: gp3: 3,000 + - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 IOPS For io2 volumes, you + can achieve up to 256,000 IOPS on instances built on the Nitro System. On other instances, + you can achieve performance up to 32,000 IOPS. Default: The existing value is retained if + you keep the same volume type. If you change the volume type to io1, io2, or gp3, the + default is 3,000. - `"MultiAttachEnabled"`: Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Nitro-based instances in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more - information, see Amazon EBS Multi-Attach in the Amazon Elastic Compute Cloud User Guide. + information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. - `"Size"`: The target size of the volume, in GiB. The target volume size must be greater than or equal to the existing size of the volume. The following are the supported volumes - sizes for each volume type: gp2 and gp3: 1-16,384 io1 and io2: 4-16,384 st1 and - sc1: 125-16,384 standard: 1-1,024 Default: The existing size is retained. + sizes for each volume type: gp2 and gp3: 1 - 16,384 GiB io1: 4 - 16,384 GiB io2: 4 + - 65,536 GiB st1 and sc1: 125 - 16,384 GiB standard: 1 - 1024 GiB Default: The + existing size is retained. - `"Throughput"`: The target throughput of the volume, in MiB/s. This parameter is valid only for gp3 volumes. The maximum value is 1,000. Default: The existing value is retained if the source and target volume type is gp3. Otherwise, the default value is 125. Valid Range: Minimum value of 125. Maximum value of 1000. - `"VolumeType"`: The target EBS volume type of the volume. For more information, see - Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide. Default: The - existing type is retained. + Amazon EBS volume types in the Amazon EBS User Guide. Default: The existing type is + retained. """ function modify_volume(VolumeId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -26266,7 +27570,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AddRouteTableId"`: (Gateway endpoint) The IDs of the route tables to associate with the endpoint. - `"AddSecurityGroupId"`: (Interface endpoint) The IDs of the security groups to associate - with the network interface. + with the endpoint network interfaces. - `"AddSubnetId"`: (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in which to serve the endpoint. For a Gateway Load Balancer endpoint, you can specify only one subnet. @@ -26282,11 +27586,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"RemoveRouteTableId"`: (Gateway endpoint) The IDs of the route tables to disassociate from the endpoint. - `"RemoveSecurityGroupId"`: (Interface endpoint) The IDs of the security groups to - disassociate from the network interface. + disassociate from the endpoint network interfaces. - `"RemoveSubnetId"`: (Interface endpoint) The IDs of the subnets from which to remove the endpoint. - `"ResetPolicy"`: (Gateway endpoint) Specify true to reset the policy document to the default policy. The default policy allows full access to the service. +- `"SubnetConfiguration"`: The subnet configurations for the endpoint. """ function modify_vpc_endpoint( VpcEndpointId; aws_config::AbstractAWSConfig=global_aws_config() @@ -26527,25 +27832,17 @@ end modify_vpc_peering_connection_options(vpc_peering_connection_id) modify_vpc_peering_connection_options(vpc_peering_connection_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Modifies the VPC peering connection options on one side of a VPC peering -connection. You can do the following: Enable/disable communication over the peering -connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) -and instances in the peer VPC. Enable/disable communication over the peering connection -between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC. -Enable/disable the ability to resolve public DNS hostnames to private IP addresses when -queried from instances in the peer VPC. If the peered VPCs are in the same Amazon Web -Services account, you can enable DNS resolution for queries from the local VPC. This -ensures that queries from the local VPC resolve to private IP addresses in the peer VPC. -This option is not available if the peered VPCs are in different Amazon Web Services -accounts or different Regions. For peered VPCs in different Amazon Web Services accounts, -each Amazon Web Services account owner must initiate a separate request to modify the -peering connection options. For inter-region peering connections, you must use the Region -for the requester VPC to modify the requester VPC peering options and the Region for the -accepter VPC to modify the accepter VPC peering options. To verify which VPCs are the -accepter and the requester for a VPC peering connection, use the -DescribeVpcPeeringConnections command. +Modifies the VPC peering connection options on one side of a VPC peering connection. If the +peered VPCs are in the same Amazon Web Services account, you can enable DNS resolution for +queries from the local VPC. This ensures that queries from the local VPC resolve to private +IP addresses in the peer VPC. This option is not available if the peered VPCs are in +different Amazon Web Services accounts or different Regions. For peered VPCs in different +Amazon Web Services accounts, each Amazon Web Services account owner must initiate a +separate request to modify the peering connection options. For inter-region peering +connections, you must use the Region for the requester VPC to modify the requester VPC +peering options and the Region for the accepter VPC to modify the accepter VPC peering +options. To verify which VPCs are the accepter and the requester for a VPC peering +connection, use the DescribeVpcPeeringConnections command. # Arguments - `vpc_peering_connection_id`: The ID of the VPC peering connection. @@ -26598,7 +27895,7 @@ tenancy attribute of a VPC to default only. You cannot change the instance tenan attribute to dedicated. After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected. For more information, see -Dedicated Instances in the Amazon Elastic Compute Cloud User Guide. +Dedicated Instances in the Amazon EC2 User Guide. # Arguments - `instance_tenancy`: The instance tenancy attribute for the VPC. @@ -26833,7 +28130,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"SkipTunnelReplacement"`: Choose whether or not to trigger immediate tunnel replacement. - Valid values: True | False + This is only applicable when turning on or off EnableTunnelLifecycleControl. Valid values: + True | False """ function modify_vpn_tunnel_options( TunnelOptions, @@ -27027,8 +28325,8 @@ the address range is provisioned, it is ready to be advertised using AdvertiseBy Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more -information, see Bring your own IP addresses (BYOIP) in the Amazon Elastic Compute Cloud -User Guide. Provisioning an address range is an asynchronous operation, so the call returns +information, see Bring your own IP addresses (BYOIP) in the Amazon EC2 User Guide. +Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use @@ -27037,9 +28335,10 @@ address pool. # Arguments - `cidr`: The public IPv4 or IPv6 address range, in CIDR notation. The most specific IPv4 - prefix that you can specify is /24. The most specific IPv6 prefix you can specify is /56. - The address range cannot overlap with another address range that you've brought to this or - another Region. + prefix that you can specify is /24. The most specific IPv6 address range that you can bring + is /48 for CIDRs that are publicly advertisable and /56 for CIDRs that are not publicly + advertisable. The address range cannot overlap with another address range that you've + brought to this or another Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -27050,6 +28349,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"MultiRegion"`: Reserved. +- `"NetworkBorderGroup"`: If you have Local Zones enabled, you can choose a network border + group for Local Zones when you provision and advertise a BYOIPv4 CIDR. Choose the network + border group carefully as the EIP and the Amazon Web Services resource it is associated + with must reside in the same network border group. You can provision BYOIP address ranges + to and advertise them in the following Local Zone network border groups: us-east-1-dfw-2 + us-west-2-lax-1 us-west-2-phx-2 You cannot provision or advertise BYOIPv6 address + ranges in Local Zones at this time. - `"PoolTagSpecification"`: The tags to apply to the address pool. - `"PubliclyAdvertisable"`: (IPv6 only) Indicate whether the address range will be publicly advertised to the internet. Default: true @@ -27073,6 +28379,65 @@ function provision_byoip_cidr( ) end +""" + provision_ipam_byoasn(asn, asn_authorization_context, ipam_id) + provision_ipam_byoasn(asn, asn_authorization_context, ipam_id, params::Dict{String,<:Any}) + +Provisions your Autonomous System Number (ASN) for use in your Amazon Web Services account. +This action requires authorization context for Amazon to bring the ASN to an Amazon Web +Services account. For more information, see Tutorial: Bring your ASN to IPAM in the Amazon +VPC IPAM guide. + +# Arguments +- `asn`: A public 2-byte or 4-byte ASN. +- `asn_authorization_context`: An ASN authorization context. +- `ipam_id`: An IPAM ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function provision_ipam_byoasn( + Asn, AsnAuthorizationContext, IpamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "ProvisionIpamByoasn", + Dict{String,Any}( + "Asn" => Asn, + "AsnAuthorizationContext" => AsnAuthorizationContext, + "IpamId" => IpamId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function provision_ipam_byoasn( + Asn, + AsnAuthorizationContext, + IpamId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "ProvisionIpamByoasn", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Asn" => Asn, + "AsnAuthorizationContext" => AsnAuthorizationContext, + "IpamId" => IpamId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ provision_ipam_pool_cidr(ipam_pool_id) provision_ipam_pool_cidr(ipam_pool_id, params::Dict{String,<:Any}) @@ -27093,7 +28458,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys bring a specified IP address range to Amazon using BYOIP. This option applies to public pools only. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see Ensuring Idempotency. + idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -27189,6 +28554,63 @@ function provision_public_ipv4_pool_cidr( ) end +""" + purchase_capacity_block(capacity_block_offering_id, instance_platform) + purchase_capacity_block(capacity_block_offering_id, instance_platform, params::Dict{String,<:Any}) + +Purchase the Capacity Block for use with your account. With Capacity Blocks you ensure GPU +capacity is available for machine learning (ML) workloads. You must specify the ID of the +Capacity Block offering you are purchasing. + +# Arguments +- `capacity_block_offering_id`: The ID of the Capacity Block offering. +- `instance_platform`: The type of operating system for which to reserve capacity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"TagSpecification"`: The tags to apply to the Capacity Block during launch. +""" +function purchase_capacity_block( + CapacityBlockOfferingId, + InstancePlatform; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "PurchaseCapacityBlock", + Dict{String,Any}( + "CapacityBlockOfferingId" => CapacityBlockOfferingId, + "InstancePlatform" => InstancePlatform, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function purchase_capacity_block( + CapacityBlockOfferingId, + InstancePlatform, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "PurchaseCapacityBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CapacityBlockOfferingId" => CapacityBlockOfferingId, + "InstancePlatform" => InstancePlatform, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ purchase_host_reservation(offering_id, item) purchase_host_reservation(offering_id, item, params::Dict{String,<:Any}) @@ -27255,8 +28677,8 @@ DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances. To queue a purchase for a future date and time, specify a purchase time. If you do not specify a purchase time, the default is the -current time. For more information, see Reserved Instances and Reserved Instance -Marketplace in the Amazon EC2 User Guide. +current time. For more information, see Reserved Instances and Sell in the Reserved +Instance Marketplace in the Amazon EC2 User Guide. # Arguments - `instance_count`: The number of Reserved Instances to purchase. @@ -27413,27 +28835,27 @@ end register_image(name) register_image(name, params::Dict{String,<:Any}) -Registers an AMI. When you're creating an AMI, this is the final step you must complete -before you can launch an instance from the AMI. For more information about creating AMIs, -see Create your own AMI in the Amazon Elastic Compute Cloud User Guide. For Amazon -EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you -don't have to register the AMI yourself. We recommend that you always use CreateImage -unless you have a specific reason to use RegisterImage. If needed, you can deregister an -AMI at any time. Any modifications you make to an AMI backed by an instance store volume -invalidates its registration. If you make changes to an image, deregister the previous -image and register the new image. Register a snapshot of a root device volume You can use -RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device -volume. You specify the snapshot using a block device mapping. You can't set the encryption -state of the volume using the block device mapping. If the snapshot is encrypted, or -encryption by default is enabled, the root volume of an instance launched from the AMI is -encrypted. For more information, see Create a Linux AMI from a snapshot and Use encryption -with Amazon EBS-backed AMIs in the Amazon Elastic Compute Cloud User Guide. Amazon Web -Services Marketplace product codes If any snapshots have Amazon Web Services Marketplace -product codes, they are copied to the new AMI. Windows and some Linux distributions, such -as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the Amazon -EC2 billing product code associated with an AMI to verify the subscription status for -package updates. To create a new AMI for operating systems that require a billing product -code, instead of registering the AMI, do the following to preserve the billing product code +Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is +the final step in the creation process. For more information about creating AMIs, see +Create your own AMI in the Amazon Elastic Compute Cloud User Guide. For Amazon EBS-backed +instances, CreateImage creates and registers the AMI in a single request, so you don't have +to register the AMI yourself. We recommend that you always use CreateImage unless you have +a specific reason to use RegisterImage. If needed, you can deregister an AMI at any time. +Any modifications you make to an AMI backed by an instance store volume invalidates its +registration. If you make changes to an image, deregister the previous image and register +the new image. Register a snapshot of a root device volume You can use RegisterImage to +create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify +the snapshot using a block device mapping. You can't set the encryption state of the volume +using the block device mapping. If the snapshot is encrypted, or encryption by default is +enabled, the root volume of an instance launched from the AMI is encrypted. For more +information, see Create a Linux AMI from a snapshot and Use encryption with Amazon +EBS-backed AMIs in the Amazon Elastic Compute Cloud User Guide. Amazon Web Services +Marketplace product codes If any snapshots have Amazon Web Services Marketplace product +codes, they are copied to the new AMI. Windows and some Linux distributions, such as Red +Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the Amazon EC2 +billing product code associated with an AMI to verify the subscription status for package +updates. To create a new AMI for operating systems that require a billing product code, +instead of registering the AMI, do the following to preserve the billing product code association: Launch an instance from an existing AMI with that billing product code. Customize the instance. Create an AMI from the instance using CreateImage. If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an @@ -27461,7 +28883,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the volume. If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, - Amazon EBS local snapshots on Outposts in the Amazon EC2 User Guide. + Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide. - `"BootMode"`: The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS. The operating system contained in the AMI must be configured to support the specified boot mode. For more information, see Boot modes in the @@ -27476,6 +28898,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys addition, HttpPutResponseHopLimit is set to 2. For more information, see Configure the AMI in the Amazon EC2 User Guide. If you set the value to v2.0, make sure that your AMI software can support IMDSv2. +- `"TagSpecification"`: The tags to apply to the AMI. To tag the AMI, the value for + ResourceType must be image. If you specify another value for ResourceType, the request + fails. To tag an AMI after it has been registered, see CreateTags. - `"TpmSupport"`: Set to v2.0 to enable Trusted Platform Module (TPM) support. For more information, see NitroTPM in the Amazon EC2 User Guide. - `"UefiData"`: Base64 representation of the non-volatile UEFI variable store. To retrieve @@ -27572,9 +28997,9 @@ end Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated with a supported EC2 instance that receives multicast -traffic. For information about supported instances, see Multicast Consideration in Amazon -VPC Transit Gateways. After you add the members, use SearchTransitGatewayMulticastGroups to -verify that the members were added to the transit gateway multicast group. +traffic. For more information, see Multicast on transit gateways in the Amazon Web Services +Transit Gateways Guide. After you add the members, use SearchTransitGatewayMulticastGroups +to verify that the members were added to the transit gateway multicast group. # Arguments - `transit_gateway_multicast_domain_id`: The ID of the transit gateway multicast domain. @@ -27632,8 +29057,8 @@ end Registers sources (network interfaces) with the specified transit gateway multicast group. A multicast source is a network interface attached to a supported instance that sends -multicast traffic. For information about supported instances, see Multicast Considerations -in Amazon VPC Transit Gateways. After you add the source, use +multicast traffic. For more information about supported instances, see Multicast on transit +gateways in the Amazon Web Services Transit Gateways Guide. After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast group. @@ -27936,9 +29361,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AllocationId"`: The allocation ID. This parameter is required. - `"NetworkBorderGroup"`: The set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. If you provide an incorrect network - border group, you receive an InvalidAddress.NotFound error. You cannot use a network border - group with EC2 Classic. If you attempt this operation on EC2 classic, you receive an - InvalidParameterCombination error. + border group, you receive an InvalidAddress.NotFound error. - `"PublicIp"`: Deprecated. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -28110,8 +29533,7 @@ end Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see -Network ACLs in the Amazon Virtual Private Cloud User Guide. This is an idempotent -operation. +Network ACLs in the Amazon VPC User Guide. This is an idempotent operation. # Arguments - `association_id`: The ID of the current association between the original network ACL and @@ -28161,7 +29583,7 @@ end replace_network_acl_entry(egress, network_acl_id, protocol, rule_action, rule_number, params::Dict{String,<:Any}) Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `egress`: Indicates whether to replace the egress rule. Default: If no value is @@ -28247,7 +29669,7 @@ end Replaces an existing route within a route table in a VPC. You must specify either a destination CIDR block or a prefix list ID. You must also specify exactly one of the resources from the parameter list, or reset the local route to its default target. For more -information, see Route tables in the Amazon Virtual Private Cloud User Guide. +information, see Route tables in the Amazon VPC User Guide. # Arguments - `route_table_id`: The ID of the route table. @@ -28307,9 +29729,9 @@ end Changes the route table associated with a given subnet, internet gateway, or virtual private gateway in a VPC. After the operation completes, the subnet or gateway uses the routes in the new route table. For more information about route tables, see Route tables in -the Amazon Virtual Private Cloud User Guide. You can also use this operation to change -which table is the main route table in the VPC. Specify the main route table's association -ID and the route table ID of the new main route table. +the Amazon VPC User Guide. You can also use this operation to change which table is the +main route table in the VPC. Specify the main route table's association ID and the route +table ID of the new main route table. # Arguments - `association_id`: The association ID. @@ -28599,11 +30021,10 @@ end request_spot_instances() request_spot_instances(params::Dict{String,<:Any}) -Creates a Spot Instance request. For more information, see Spot Instance requests in the -Amazon EC2 User Guide for Linux Instances. We strongly discourage using the -RequestSpotInstances API because it is a legacy API with no planned investment. For options -for requesting Spot Instances, see Which is the best Spot request method to use? in the -Amazon EC2 User Guide for Linux Instances. +Creates a Spot Instance request. For more information, see Work with Spot Instance in the +Amazon EC2 User Guide. We strongly discourage using the RequestSpotInstances API because +it is a legacy API with no planned investment. For options for requesting Spot Instances, +see Which is the best Spot request method to use? in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -28630,8 +30051,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launched in any available Availability Zone. - `"blockDurationMinutes"`: Deprecated. - `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. For more information, see How to Ensure Idempotency in the - Amazon EC2 User Guide for Linux Instances. + idempotency of the request. For more information, see Ensuring idempotency in Amazon EC2 + API requests in the Amazon EC2 User Guide. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -28728,7 +30149,7 @@ Resets the default KMS key for EBS encryption for your account in this Region to Web Services managed KMS key for EBS. After resetting the default KMS key to the Amazon Web Services managed KMS key, you can continue to encrypt by a customer managed KMS key by specifying it when you create the volume. For more information, see Amazon EBS encryption -in the Amazon Elastic Compute Cloud User Guide. +in the Amazon EBS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -28850,7 +30271,7 @@ the instance must be in a stopped state. To reset the sourceDestCheck, the insta either running or stopped. The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, -see NAT Instances in the Amazon VPC User Guide. +see NAT instances in the Amazon VPC User Guide. # Arguments - `attribute`: The attribute to reset. You can only reset the following attributes: kernel @@ -28941,7 +30362,7 @@ end reset_snapshot_attribute(attribute, snapshot_id, params::Dict{String,<:Any}) Resets permission settings for the specified snapshot. For more information about modifying -snapshot permissions, see Share a snapshot in the Amazon Elastic Compute Cloud User Guide. +snapshot permissions, see Share a snapshot in the Amazon EBS User Guide. # Arguments - `attribute`: The attribute to reset. Currently, only the attribute for permission to @@ -29129,7 +30550,7 @@ end restore_snapshot_from_recycle_bin(snapshot_id, params::Dict{String,<:Any}) Restores a snapshot from the Recycle Bin. For more information, see Restore snapshots from -the Recycle Bin in the Amazon Elastic Compute Cloud User Guide. +the Recycle Bin in the Amazon EBS User Guide. # Arguments - `snapshot_id`: The ID of the snapshot to restore. @@ -29172,8 +30593,7 @@ end Restores an archived Amazon EBS snapshot for use temporarily or permanently, or modifies the restore period or restore type for a snapshot that was previously temporarily restored. For more information see Restore an archived snapshot and modify the restore period or -restore type for a temporarily restored snapshot in the Amazon Elastic Compute Cloud User -Guide. +restore type for a temporarily restored snapshot in the Amazon EBS User Guide. # Arguments - `snapshot_id`: The ID of the snapshot to restore. @@ -29278,19 +30698,19 @@ end revoke_security_group_egress(group_id) revoke_security_group_egress(group_id, params::Dict{String,<:Any}) -[VPC only] Removes the specified outbound (egress) rules from a security group for EC2-VPC. -This action does not apply to security groups for use in EC2-Classic. You can specify rules -using either rule IDs or security group rule properties. If you use rule properties, the -values that you specify (for example, ports) must match the existing rule's values exactly. -Each rule has a protocol, from and to ports, and destination (CIDR range, security group, -or prefix list). For the TCP and UDP protocols, you must also specify the destination port -or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If -the security group rule has a description, you do not need to specify the description to -revoke the rule. [Default VPC] If the values you specify do not match the existing rule's -values, no error is returned, and the output describes the security group rules that were -not revoked. Amazon Web Services recommends that you describe the security group to verify -that the rules were removed. Rule changes are propagated to instances within the security -group as quickly as possible. However, a small delay might occur. +Removes the specified outbound (egress) rules from the specified security group. You can +specify rules using either rule IDs or security group rule properties. If you use rule +properties, the values that you specify (for example, ports) must match the existing rule's +values exactly. Each rule has a protocol, from and to ports, and destination (CIDR range, +security group, or prefix list). For the TCP and UDP protocols, you must also specify the +destination port or range of ports. For the ICMP protocol, you must also specify the ICMP +type and code. If the security group rule has a description, you do not need to specify the +description to revoke the rule. For a default VPC, if the values you specify do not match +the existing rule's values, no error is returned, and the output describes the security +group rules that were not revoked. Amazon Web Services recommends that you describe the +security group to verify that the rules were removed. Rule changes are propagated to +instances within the security group as quickly as possible. However, a small delay might +occur. # Arguments - `group_id`: The ID of the security group. @@ -29345,44 +30765,37 @@ Each rule has a protocol, from and to ports, and source (CIDR range, security gr prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke -the rule. [EC2-Classic, default VPC] If the values you specify do not match the existing -rule's values, no error is returned, and the output describes the security group rules that -were not revoked. Amazon Web Services recommends that you describe the security group to -verify that the rules were removed. Rule changes are propagated to instances within the -security group as quickly as possible. However, a small delay might occur. We are retiring -EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, -see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. +the rule. For a default VPC, if the values you specify do not match the existing rule's +values, no error is returned, and the output describes the security group rules that were +not revoked. For a non-default VPC, if the values you specify do not match the existing +rule's values, an InvalidPermission.NotFound client error is returned, and no rules are +revoked. Amazon Web Services recommends that you describe the security group to verify that +the rules were removed. Rule changes are propagated to instances within the security group +as quickly as possible. However, a small delay might occur. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CidrIp"`: The CIDR IP address range. You can't specify this parameter when specifying a source security group. - `"FromPort"`: If the protocol is TCP or UDP, this is the start of the port range. If the - protocol is ICMP, this is the type number. A value of -1 indicates all ICMP types. -- `"GroupId"`: The ID of the security group. You must specify either the security group ID - or the security group name in the request. For security groups in a nondefault VPC, you - must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. + protocol is ICMP, this is the ICMP type or -1 (all ICMP types). +- `"GroupId"`: The ID of the security group. +- `"GroupName"`: [Default VPC] The name of the security group. You must specify either the + security group ID or the security group name in the request. For security groups in a + nondefault VPC, you must specify the security group ID. - `"IpPermissions"`: The sets of IP permissions. You can't specify a source security group and a CIDR IP address range in the same set of permissions. - `"IpProtocol"`: The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all. - `"SecurityGroupRuleId"`: The IDs of the security group rules. -- `"SourceSecurityGroupName"`: [EC2-Classic, default VPC] The name of the source security - group. You can't specify this parameter in combination with the following parameters: the - CIDR IP address range, the start of the port range, the IP protocol, and the end of the - port range. For EC2-VPC, the source security group must be in the same VPC. To revoke a - specific rule for an IP protocol and port range, use a set of IP permissions instead. -- `"SourceSecurityGroupOwnerId"`: [EC2-Classic] The Amazon Web Services account ID of the - source security group, if the source security group is in a different account. You can't - specify this parameter in combination with the following parameters: the CIDR IP address - range, the IP protocol, the start of the port range, and the end of the port range. To - revoke a specific rule for an IP protocol and port range, use a set of IP permissions - instead. +- `"SourceSecurityGroupName"`: [Default VPC] The name of the source security group. You + can't specify this parameter in combination with the following parameters: the CIDR IP + address range, the start of the port range, the IP protocol, and the end of the port range. + The source security group must be in the same VPC. To revoke a specific rule for an IP + protocol and port range, use a set of IP permissions instead. +- `"SourceSecurityGroupOwnerId"`: Not supported. - `"ToPort"`: If the protocol is TCP or UDP, this is the end of the port range. If the - protocol is ICMP, this is the code. A value of -1 indicates all ICMP codes. + protocol is ICMP, this is the ICMP code or -1 (all ICMP codes). - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -29414,34 +30827,35 @@ If you don't have a default VPC, you must specify a subnet ID in the request. instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet. Not all instance types support IPv6 addresses. For more information, see Instance types. If you don't -specify a security group ID, we use the default security group. For more information, see -Security groups. If any of the AMIs have a product code attached for which the user has -not subscribed, the request fails. You can create a launch template, which is a resource -that contains the parameters to launch an instance. When you launch an instance using -RunInstances, you can specify the launch template instead of specifying the launch -parameters. To ensure faster instance launches, break up large requests into smaller +specify a security group ID, we use the default security group for the VPC. For more +information, see Security groups. If any of the AMIs have a product code attached for +which the user has not subscribed, the request fails. You can create a launch template, +which is a resource that contains the parameters to launch an instance. When you launch an +instance using RunInstances, you can specify the launch template instead of specifying the +launch parameters. To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead -of one launch request for 500 instances. An instance is ready for you to use when it's in -the running state. You can check the state of your instance using DescribeInstances. You -can tag instances and EBS volumes during launch, after launch, or both. For more -information, see CreateTags and Tagging your Amazon EC2 resources. Linux instances have -access to the public key of the key pair at boot. You can use this key to provide secure -access to the instance. Amazon EC2 public images use this feature to provide secure access -without passwords. For more information, see Key pairs. For troubleshooting, see What to do -if an instance immediately terminates, and Troubleshooting connecting to your instance. - -# Arguments -- `max_count`: The maximum number of instances to launch. If you specify more instances - than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest - possible number of instances above MinCount. Constraints: Between 1 and the maximum number - you're allowed for the specified instance type. For more information about the default - limits, and how to request an increase, see How many instances can I run in Amazon EC2 in - the Amazon EC2 FAQ. -- `min_count`: The minimum number of instances to launch. If you specify a minimum that is - more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 - launches no instances. Constraints: Between 1 and the maximum number you're allowed for the - specified instance type. For more information about the default limits, and how to request - an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ. +of one launch request for 500 instances. RunInstances is subject to both request rate +limiting and resource rate limiting. For more information, see Request throttling. An +instance is ready for you to use when it's in the running state. You can check the state of +your instance using DescribeInstances. You can tag instances and EBS volumes during launch, +after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 +resources. Linux instances have access to the public key of the key pair at boot. You can +use this key to provide secure access to the instance. Amazon EC2 public images use this +feature to provide secure access without passwords. For more information, see Key pairs. +For troubleshooting, see What to do if an instance immediately terminates, and +Troubleshooting connecting to your instance. + +# Arguments +- `max_count`: The maximum number of instances to launch. If you specify a value that is + more capacity than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 + launches the largest possible number of instances above the specified minimum count. + Constraints: Between 1 and the quota for the specified instance type for your account for + this Region. For more information, see Amazon EC2 instance type quotas. +- `min_count`: The minimum number of instances to launch. If you specify a value that is + more capacity than Amazon EC2 can provide in the target Availability Zone, Amazon EC2 does + not launch any instances. Constraints: Between 1 and the quota for the specified instance + type for your account for this Region. For more information, see Amazon EC2 instance type + quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -29461,34 +30875,38 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (T3/T3a/T4g instances) For T3 instances with host tenancy, only standard is supported. - `"DisableApiStop"`: Indicates whether an instance is enabled for stop protection. For more information, see Stop protection. -- `"ElasticGpuSpecification"`: An elastic GPU to associate with the instance. An Elastic - GPU is a GPU resource that you can attach to your Windows instance to accelerate the - graphics performance of your applications. For more information, see Amazon EC2 Elastic - GPUs in the Amazon EC2 User Guide. +- `"ElasticGpuSpecification"`: An elastic GPU to associate with the instance. Amazon + Elastic Graphics reached end of life on January 8, 2024. - `"ElasticInferenceAccelerator"`: An elastic inference accelerator to associate with the - instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 - instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify - accelerators from different generations in the same request. Starting April 15, 2023, - Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and - will help current customers migrate their workloads to options that offer better price and - performance. After April 15, 2023, new customers will not be able to launch instances with - Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers - who have used Amazon EI at least once during the past 30-day period are considered current - customers and will be able to continue using the service. + instance. Amazon Elastic Inference (EI) is no longer available to new customers. For more + information, see Amazon Elastic Inference FAQs. +- `"EnablePrimaryIpv6"`: If you’re launching an instance into a dual-stack or IPv6-only + subnet, you can enable assigning a primary IPv6 address. A primary IPv6 address is an IPv6 + GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use + this option if an instance relies on its IPv6 address not changing. When you launch the + instance, Amazon Web Services will automatically assign an IPv6 address associated with the + ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA + address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to + be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the + instance is terminated or the network interface is detached. If you have multiple IPv6 + addresses associated with an ENI attached to your instance and you enable a primary IPv6 + address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 + address. - `"EnclaveOptions"`: Indicates whether the instance is enabled for Amazon Web Services - Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in + Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. -- `"HibernationOptions"`: Indicates whether an instance is enabled for hibernation. For - more information, see Hibernate your instance in the Amazon EC2 User Guide. You can't +- `"HibernationOptions"`: Indicates whether an instance is enabled for hibernation. This + parameter is valid only if the instance meets the hibernation prerequisites. For more + information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. - `"ImageId"`: The ID of the AMI. An AMI ID is required to launch an instance and must be specified here or in a launch template. - `"InstanceMarketOptions"`: The market (purchasing) option for the instances. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop. -- `"InstanceType"`: The instance type. For more information, see Instance types in the - Amazon EC2 User Guide. Default: m1.small +- `"InstanceType"`: The instance type. For more information, see Amazon EC2 instance types + in the Amazon EC2 User Guide. - `"Ipv6Address"`: The IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a @@ -29504,9 +30922,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KeyName"`: The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair. If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in. -- `"LaunchTemplate"`: The launch template to use to launch the instances. Any parameters - that you specify in RunInstances override the same parameters in the launch template. You - can specify either the name or ID of a launch template, but not both. +- `"LaunchTemplate"`: The launch template. Any additional parameters that you specify for + the new instance overwrite the corresponding parameters included in the launch template. - `"LicenseSpecification"`: The license configurations. - `"MaintenanceOptions"`: The maintenance and recovery options for the instance. - `"MetadataOptions"`: The metadata options for the instance. For more information, see @@ -29514,29 +30931,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Monitoring"`: Specifies whether detailed monitoring is enabled for the instance. - `"Placement"`: The placement for the instance. - `"PrivateDnsNameOptions"`: The options for the instance hostname. The default values are - inherited from the subnet. + inherited from the subnet. Applies only if creating a network interface, not attaching an + existing one. - `"RamdiskId"`: The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the Amazon Web Services Resource Center and search for the kernel ID. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon EC2 User Guide. - `"SecurityGroup"`: [Default VPC] The names of the security groups. If you specify a - network interface, you must specify any security groups as part of the network interface. - Default: Amazon EC2 uses the default security group. + network interface, you must specify any security groups as part of the network interface + instead of using this parameter. Default: Amazon EC2 uses the default security group. - `"SecurityGroupId"`: The IDs of the security groups. You can create a security group using CreateSecurityGroup. If you specify a network interface, you must specify any - security groups as part of the network interface. + security groups as part of the network interface instead of using this parameter. - `"SubnetId"`: The ID of the subnet to launch the instance into. If you specify a network - interface, you must specify any subnets as part of the network interface. + interface, you must specify any subnets as part of the network interface instead of using + this parameter. - `"TagSpecification"`: The tags to apply to the resources that are created during instance - launch. You can specify tags for the following resources only: Instances Volumes - Elastic graphics Spot Instance requests Network interfaces To tag a resource after it - has been created, see CreateTags. + launch. You can specify tags for the following resources only: Instances Volumes Spot + Instance requests Network interfaces To tag a resource after it has been created, see + CreateTags. - `"UserData"`: The user data script to make available to the instance. For more - information, see Run commands on your Linux instance at launch and Run commands on your - Windows instance at launch. If you are using a command line tool, base64-encoding is - performed for you, and you can load the text from a file. Otherwise, you must provide - base64-encoded text. User data is limited to 16 KB. + information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User + Guide. If you are using a command line tool, base64-encoding is performed for you, and you + can load the text from a file. Otherwise, you must provide base64-encoded text. User data + is limited to 16 KB. - `"additionalInfo"`: Reserved. - `"clientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used @@ -29559,9 +30978,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"instanceInitiatedShutdownBehavior"`: Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). Default: stop -- `"networkInterface"`: The network interfaces to associate with the instance. If you - specify a network interface, you must specify any security groups and subnets as part of - the network interface. +- `"networkInterface"`: The network interfaces to associate with the instance. - `"privateIpAddress"`: The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet. Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address @@ -29613,8 +31030,7 @@ Launches the specified Scheduled Instances. Before you can launch a Scheduled In must purchase it and obtain an identifier using PurchaseScheduledInstances. You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before -the current scheduled time period ends, you can launch it again after a few minutes. For -more information, see Scheduled Instances in the Amazon EC2 User Guide. +the current scheduled time period ends, you can launch it again after a few minutes. # Arguments - `launch_specification`: The launch specification. You must match the instance type, @@ -29812,7 +31228,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"MaxResults"`: The maximum number of routes to return. +- `"MaxResults"`: The maximum number of routes to return. If a value is not provided, the + default is 1000. """ function search_transit_gateway_routes( Filter, TransitGatewayRouteTableId; aws_config::AbstractAWSConfig=global_aws_config() @@ -29862,8 +31279,8 @@ tasks, such as generating a memory dump file, loading a secondary kernel, or obt call trace. Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks. For more information about configuring your operating system to generate a crash dump when a kernel -panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) (Linux -instances) or Send a diagnostic interrupt (for advanced users) (Windows instances). +panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) in the +Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -29913,10 +31330,10 @@ instance usage, and thereafter charges per second for instance usage. Before sto instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM. Performing this operation on an instance that uses an instance store as its root device returns an error. If you attempt to start a T3 instance -with host tenancy and the unlimted CPU credit option, the request fails. The unlimited CPU +with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated. -For more information, see Stop and start your instance in the Amazon EC2 User Guide. +For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide. # Arguments - `instance_id`: The IDs of the instances. @@ -30112,31 +31529,32 @@ end stop_instances(instance_id) stop_instances(instance_id, params::Dict{String,<:Any}) -Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance -in the Amazon EC2 User Guide. You can use the Stop action to hibernate an instance if the -instance is enabled for hibernation and it meets the hibernation prerequisites. For more -information, see Hibernate your instance in the Amazon EC2 User Guide. We don't charge -usage for a stopped instance, or data transfer fees; however, your root partition Amazon -EBS volume remains and continues to persist your data, and you are charged for Amazon EBS -volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum -for instance usage, and thereafter charges per second for instance usage. You can't stop or -hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot -Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they -are interrupted. For more information, see Hibernating interrupted Spot Instances in the -Amazon EC2 User Guide. When you stop or hibernate an instance, we shut it down. You can -restart your instance at any time. Before stopping or hibernating an instance, make sure it -is in a state from which it can be restarted. Stopping an instance does not preserve data -stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance -cannot hibernate successfully, a normal shutdown occurs. Stopping and hibernating an -instance is different to rebooting or terminating it. For example, when you stop or -hibernate an instance, the root device and any other devices attached to the instance -persist. When you terminate an instance, the root device and any other devices attached -during the instance launch are automatically deleted. For more information about the -differences between rebooting, stopping, hibernating, and terminating instances, see -Instance lifecycle in the Amazon EC2 User Guide. When you stop an instance, we attempt to -shut it down forcibly after a short while. If your instance appears stuck in the stopping -state after a period of time, there may be an issue with the underlying host computer. For -more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide. +Stops an Amazon EBS-backed instance. For more information, see Stop and start Amazon EC2 +instances in the Amazon EC2 User Guide. You can use the Stop action to hibernate an +instance if the instance is enabled for hibernation and it meets the hibernation +prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon +EC2 User Guide. We don't charge usage for a stopped instance, or data transfer fees; +however, your root partition Amazon EBS volume remains and continues to persist your data, +and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon +EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for +instance usage. You can't stop or hibernate instance store-backed instances. You can't use +the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should +hibernate Spot Instances when they are interrupted. For more information, see Hibernating +interrupted Spot Instances in the Amazon EC2 User Guide. When you stop or hibernate an +instance, we shut it down. You can restart your instance at any time. Before stopping or +hibernating an instance, make sure it is in a state from which it can be restarted. +Stopping an instance does not preserve data stored in RAM, but hibernating an instance does +preserve data stored in RAM. If an instance cannot hibernate successfully, a normal +shutdown occurs. Stopping and hibernating an instance is different to rebooting or +terminating it. For example, when you stop or hibernate an instance, the root device and +any other devices attached to the instance persist. When you terminate an instance, the +root device and any other devices attached during the instance launch are automatically +deleted. For more information about the differences between rebooting, stopping, +hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User +Guide. When you stop an instance, we attempt to shut it down forcibly after a short while. +If your instance appears stuck in the stopping state after a period of time, there may be +an issue with the underlying host computer. For more information, see Troubleshoot stopping +your instance in the Amazon EC2 User Guide. # Arguments - `instance_id`: The IDs of the instances. @@ -30383,16 +31801,16 @@ end Unassigns secondary private IPv4 addresses from a private NAT gateway. You cannot unassign your primary private IP. For more information, see Edit secondary IP address associations -in the Amazon Virtual Private Cloud User Guide. While unassigning is in progress, you -cannot assign/unassign additional IP addresses while the connections are being drained. You -are, however, allowed to delete the NAT gateway. A private IP address will only be released -at the end of MaxDrainDurationSeconds. The private IP addresses stay associated and support -the existing connections but do not support any new connections (new connections are -distributed across the remaining assigned private IP address). After the existing -connections drain out, the private IP addresses get released. +in the Amazon VPC User Guide. While unassigning is in progress, you cannot assign/unassign +additional IP addresses while the connections are being drained. You are, however, allowed +to delete the NAT gateway. A private IP address will only be released at the end of +MaxDrainDurationSeconds. The private IP addresses stay associated and support the existing +connections, but do not support any new connections (new connections are distributed across +the remaining assigned private IP address). After the existing connections drain out, the +private IP addresses are released. # Arguments -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. - `private_ip_address`: The private IPv4 addresses you want to unassign. # Optional Parameters @@ -30438,6 +31856,46 @@ function unassign_private_nat_gateway_address( ) end +""" + unlock_snapshot(snapshot_id) + unlock_snapshot(snapshot_id, params::Dict{String,<:Any}) + +Unlocks a snapshot that is locked in governance mode or that is locked in compliance mode +but still in the cooling-off period. You can't unlock a snapshot that is locked in +compliance mode after the cooling-off period has expired. + +# Arguments +- `snapshot_id`: The ID of the snapshot to unlock. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function unlock_snapshot(SnapshotId; aws_config::AbstractAWSConfig=global_aws_config()) + return ec2( + "UnlockSnapshot", + Dict{String,Any}("SnapshotId" => SnapshotId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function unlock_snapshot( + SnapshotId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "UnlockSnapshot", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SnapshotId" => SnapshotId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ unmonitor_instances(instance_id) unmonitor_instances(instance_id, params::Dict{String,<:Any}) @@ -30481,10 +31939,10 @@ end update_security_group_rule_descriptions_egress() update_security_group_rule_descriptions_egress(params::Dict{String,<:Any}) -[VPC only] Updates the description of an egress (outbound) security group rule. You can -replace an existing description, or add a description to a rule that did not have one -previously. You can remove a description for a security group rule by omitting the -description parameter in the request. +Updates the description of an egress (outbound) security group rule. You can replace an +existing description, or add a description to a rule that did not have one previously. You +can remove a description for a security group rule by omitting the description parameter in +the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -30495,7 +31953,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID. - `"GroupName"`: [Default VPC] The name of the security group. You must specify either the - security group ID or the security group name in the request. + security group ID or the security group name. - `"IpPermissions"`: The IP permissions for the security group rule. You must specify either the IP permissions or the description. - `"SecurityGroupRuleDescription"`: The description for the egress security group rules. @@ -30538,13 +31996,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GroupId"`: The ID of the security group. You must specify either the security group ID or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. +- `"GroupName"`: [Default VPC] The name of the security group. You must specify either the + security group ID or the security group name. For security groups in a nondefault VPC, you + must specify the security group ID. - `"IpPermissions"`: The IP permissions for the security group rule. You must specify either IP permissions or a description. -- `"SecurityGroupRuleDescription"`: [VPC only] The description for the ingress security - group rules. You must specify either a description or IP permissions. +- `"SecurityGroupRuleDescription"`: The description for the ingress security group rules. + You must specify either a description or IP permissions. """ function update_security_group_rule_descriptions_ingress(; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/ecr.jl b/src/services/ecr.jl index dda16771d1..1b94d9de1c 100644 --- a/src/services/ecr.jl +++ b/src/services/ecr.jl @@ -274,19 +274,28 @@ end create_pull_through_cache_rule(ecr_repository_prefix, upstream_registry_url, params::Dict{String,<:Any}) Creates a pull through cache rule. A pull through cache rule provides a way to cache images -from an external public registry in your Amazon ECR private registry. +from an upstream registry source in your Amazon ECR private registry. For more information, +see Using pull through cache rules in the Amazon Elastic Container Registry User Guide. # Arguments - `ecr_repository_prefix`: The repository name prefix to use when caching images from the source registry. - `upstream_registry_url`: The registry URL of the upstream public registry to use as the - source for the pull through cache rule. + source for the pull through cache rule. The following is the syntax to use for each + supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker + Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - + registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io + Microsoft Azure Container Registry (azure-container-registry) - <custom>.azurecr.io + GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"credentialArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services Secrets + Manager secret that identifies the credentials to authenticate to the upstream registry. - `"registryId"`: The Amazon Web Services account ID associated with the registry to create the pull through cache rule for. If you do not specify a registry, the default registry is assumed. +- `"upstreamRegistry"`: The name of the upstream registry. """ function create_pull_through_cache_rule( ecrRepositoryPrefix, @@ -336,7 +345,9 @@ Elastic Container Registry User Guide. # Arguments - `repository_name`: The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to - group the repository into a category (such as project-a/nginx-web-app). + group the repository into a category (such as project-a/nginx-web-app). The repository name + must start with a letter and can only contain lowercase letters, numbers, hyphens, + underscores, and forward slashes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -492,15 +503,17 @@ end delete_repository(repository_name) delete_repository(repository_name, params::Dict{String,<:Any}) -Deletes a repository. If the repository contains images, you must either delete all images -in the repository or use the force option to delete the repository. +Deletes a repository. If the repository isn't empty, you must either delete the contents of +the repository or use the force option to delete the repository and have Amazon ECR delete +all of its contents on your behalf. # Arguments - `repository_name`: The name of the repository to delete. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"force"`: If a repository contains images, forces the deletion. +- `"force"`: If true, deleting the repository force deletes the contents of the repository. + If false, the repository must be empty before attempting to delete it. - `"registryId"`: The Amazon Web Services account ID associated with the registry that contains the repository to delete. If you do not specify a registry, the default registry is assumed. @@ -1823,6 +1836,59 @@ function untag_resource( ) end +""" + update_pull_through_cache_rule(credential_arn, ecr_repository_prefix) + update_pull_through_cache_rule(credential_arn, ecr_repository_prefix, params::Dict{String,<:Any}) + +Updates an existing pull through cache rule. + +# Arguments +- `credential_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services Secrets + Manager secret that identifies the credentials to authenticate to the upstream registry. +- `ecr_repository_prefix`: The repository name prefix to use when caching images from the + source registry. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"registryId"`: The Amazon Web Services account ID associated with the registry + associated with the pull through cache rule. If you do not specify a registry, the default + registry is assumed. +""" +function update_pull_through_cache_rule( + credentialArn, ecrRepositoryPrefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "UpdatePullThroughCacheRule", + Dict{String,Any}( + "credentialArn" => credentialArn, "ecrRepositoryPrefix" => ecrRepositoryPrefix + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_pull_through_cache_rule( + credentialArn, + ecrRepositoryPrefix, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ecr( + "UpdatePullThroughCacheRule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "credentialArn" => credentialArn, + "ecrRepositoryPrefix" => ecrRepositoryPrefix, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id) upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id, params::Dict{String,<:Any}) @@ -1899,3 +1965,50 @@ function upload_layer_part( feature_set=SERVICE_FEATURE_SET, ) end + +""" + validate_pull_through_cache_rule(ecr_repository_prefix) + validate_pull_through_cache_rule(ecr_repository_prefix, params::Dict{String,<:Any}) + +Validates an existing pull through cache rule for an upstream registry that requires +authentication. This will retrieve the contents of the Amazon Web Services Secrets Manager +secret, verify the syntax, and then validate that authentication to the upstream registry +is successful. + +# Arguments +- `ecr_repository_prefix`: The repository name prefix associated with the pull through + cache rule. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"registryId"`: The registry ID associated with the pull through cache rule. If you do + not specify a registry, the default registry is assumed. +""" +function validate_pull_through_cache_rule( + ecrRepositoryPrefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "ValidatePullThroughCacheRule", + Dict{String,Any}("ecrRepositoryPrefix" => ecrRepositoryPrefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function validate_pull_through_cache_rule( + ecrRepositoryPrefix, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ecr( + "ValidatePullThroughCacheRule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ecrRepositoryPrefix" => ecrRepositoryPrefix), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/ecs.jl b/src/services/ecs.jl index 63128dacb9..e34865f7c7 100644 --- a/src/services/ecs.jl +++ b/src/services/ecs.jl @@ -153,22 +153,22 @@ end Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the -UpdateService action. Starting April 15, 2023, Amazon Web Services will not onboard new -customers to Amazon Elastic Inference (EI), and will help current customers migrate their -workloads to options that offer better price and performance. After April 15, 2023, new -customers will not be able to launch instances with Amazon EI accelerators in Amazon -SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least -once during the past 30-day period are considered current customers and will be able to -continue using the service. In addition to maintaining the desired count of tasks in your -service, you can optionally run your service behind one or more load balancers. The load -balancers distribute traffic across the tasks that are associated with the service. For -more information, see Service load balancing in the Amazon Elastic Container Service -Developer Guide. Tasks for services that don't use a load balancer are considered healthy -if they're in the RUNNING state. Tasks for services that use a load balancer are considered -healthy if they're in the RUNNING state and are reported as healthy by the load balancer. -There are two service scheduler strategies available: REPLICA - The replica scheduling -strategy places and maintains your desired number of tasks across your cluster. By default, -the service scheduler spreads tasks across Availability Zones. You can use task placement +UpdateService action. On March 21, 2024, a change was made to resolve the task definition +revision before authorization. When a task definition revision is not specified, +authorization will occur using the latest revision of a task definition. In addition to +maintaining the desired count of tasks in your service, you can optionally run your service +behind one or more load balancers. The load balancers distribute traffic across the tasks +that are associated with the service. For more information, see Service load balancing in +the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to +Amazon ECS tasks by configuring the volume when creating or updating a service. +volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more +infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. +Tasks for services that don't use a load balancer are considered healthy if they're in the +RUNNING state. Tasks for services that use a load balancer are considered healthy if +they're in the RUNNING state and are reported as healthy by the load balancer. There are +two service scheduler strategies available: REPLICA - The replica scheduling strategy +places and maintains your desired number of tasks across your cluster. By default, the +service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container @@ -216,7 +216,13 @@ service name. You control your services using the CreateTaskSet operation. For m information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon -ECS task placement in the Amazon Elastic Container Service Developer Guide. +ECS task placement in the Amazon Elastic Container Service Developer Guide Starting April +15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference +(EI), and will help current customers migrate their workloads to options that offer better +price and performance. After April 15, 2023, new customers will not be able to launch +instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. +However, customers who have used Amazon EI at least once during the past 30-day period are +considered current customers and will be able to continue using the service. # Arguments - `service_name`: The name of your service. Up to 255 letters (uppercase and lowercase), @@ -232,7 +238,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for the cluster is used. A capacity provider strategy may contain a maximum of 6 capacity providers. - `"clientToken"`: An identifier that you provide to ensure the idempotency of the request. - It must be unique and is case sensitive. Up to 32 ASCII characters are allowed. + It must be unique and is case sensitive. Up to 36 ASCII characters in the range of 33-126 + (inclusive) are allowed. - `"cluster"`: The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed. - `"deploymentConfiguration"`: Optional deployment parameters that control how many tasks @@ -244,7 +251,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys isn't specified. If schedulingStrategy is DAEMON then this isn't required. - `"enableECSManagedTags"`: Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in - the Amazon Elastic Container Service Developer Guide. + the Amazon Elastic Container Service Developer Guide. When you use Amazon ECS managed tags, + you need to set the propagateTags request parameter. - `"enableExecuteCommand"`: Determines whether the execute command functionality is turned on for the service. If true, this enables execute command functionality on all containers in the service tasks. @@ -264,8 +272,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more - information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate. The - EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster. The + information, see Fargate capacity providers in the Amazon ECS Developer Guide. The EC2 + launch type runs your tasks on Amazon EC2 instances registered to your cluster. The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster. A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter @@ -318,7 +326,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"propagateTags"`: Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the - TagResource API action. + TagResource API action. You must set this to a value other than NONE when you use Cost + Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic + Container Service Developer Guide. The default is NONE. - `"role"`: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the @@ -377,6 +387,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys revision is used. A task definition must be specified if the service uses either the ECS or CODE_DEPLOY deployment controllers. For more information about deployment types, see Amazon ECS deployment types. +- `"volumeConfigurations"`: The configuration for a volume specified in the task definition + as a volume that is configured at launch time. Currently, the only supported volume type is + an Amazon EBS volume. """ function create_service(serviceName; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( @@ -407,14 +420,20 @@ end Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment -types in the Amazon Elastic Container Service Developer Guide. +types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change +was made to resolve the task definition revision before authorization. When a task +definition revision is not specified, authorization will occur using the latest revision of +a task definition. For information about the maximum number of task sets and otther +quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer +Guide. # Arguments - `cluster`: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set in. - `service`: The short name or full Amazon Resource Name (ARN) of the service to create the task set in. -- `task_definition`: The task definition for the tasks in the task set to use. +- `task_definition`: The task definition for the tasks in the task set to use. If a + revision isn't specified, the latest ACTIVE revision is used. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -432,9 +451,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used. The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created. -- `"clientToken"`: The identifier that you provide to ensure the idempotency of the - request. It's case sensitive and must be unique. It can be up to 32 ASCII characters are - allowed. +- `"clientToken"`: An identifier that you provide to ensure the idempotency of the request. + It must be unique and is case sensitive. Up to 36 ASCII characters in the range of 33-126 + (inclusive) are allowed. - `"externalId"`: An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided @@ -729,6 +748,11 @@ count. You can't use a DELETE_IN_PROGRESS task definition revision to run new ta create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated. +When you delete all INACTIVE task definition revisions, the task definition name is not +displayed in the console and not returned in the API. If a task definition revisions are in +the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and +returned in the API. The task definition name is retained by Amazon ECS and the revision is +incremented the next time you create a task definition with that name. # Arguments - `task_definitions`: The family and revision (family:revision) or full Amazon Resource @@ -1190,7 +1214,9 @@ end describe_tasks(tasks, params::Dict{String,<:Any}) Describes a specified task or tasks. Currently, stopped tasks appear in the returned -results for at least one hour. +results for at least one hour. If you have tasks with tags, and then delete the cluster, +the tagged tasks are returned in the response. If you create a new cluster with the same +name as the deleted cluster, the tagged tasks are not included in the response. # Arguments - `tasks`: A list of up to 100 task IDs or full ARN entries. @@ -1773,8 +1799,7 @@ end Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired -status of the task. Recently stopped tasks might appear in the returned results. Currently, -stopped tasks appear in the returned results for at least one hour. +status of the task. Recently stopped tasks might appear in the returned results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1828,52 +1853,74 @@ end Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings -in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, -taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource -Name (ARN) and resource ID format of the resource type for a specified user, role, or the -root user for an account is affected. The opt-in and opt-out account setting must be set -for each Amazon ECS resource separately. The ARN and resource ID format of a resource is -defined by the opt-in status of the user or role that created the resource. You must turn -on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is -specified, the elastic network interface (ENI) limit for any new container instances that -support the feature is changed. If awsvpcTrunking is turned on, any new container instances -that support the feature are launched have the increased ENI limits available to them. For -more information, see Elastic Network Interface Trunking in the Amazon Elastic Container -Service Developer Guide. When containerInsights is specified, the default setting -indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your -clusters is changed. If containerInsights is turned on, any new clusters that are created -will have Container Insights turned on unless you disable it during cluster creation. For -more information, see CloudWatch Container Insights in the Amazon Elastic Container Service -Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. -Users must have permissions for actions that create the resource, such as ecsCreateCluster. -If tags are specified when you create a resource, Amazon Web Services performs additional -authorization to verify if users or roles have permissions to create tags. Therefore, you -must grant explicit permissions to use the ecs:TagResource action. For more information, -see Grant permission to tag resources on creation in the Amazon ECS Developer Guide. +in the Amazon Elastic Container Service Developer Guide. # Arguments -- `name`: The Amazon ECS resource name for which to modify the account setting. If - serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If - taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is - affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your - Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic - network interface (ENI) limit for your Amazon ECS container instances is affected. If - containerInsights is specified, the default setting for Amazon Web Services CloudWatch - Container Insights for your clusters is affected. If fargateFIPSMode is specified, Fargate - FIPS 140 compliance is affected. If tagResourceAuthorization is specified, the opt-in - option for tagging resources on creation is affected. For information about the opt-in - timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. +- `name`: The Amazon ECS account setting name to modify. The following are the valid values + for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource + Name (ARN) and resource ID format of the resource type for a specified user, role, or the + root user for an account is affected. The opt-in and opt-out account setting must be set + for each Amazon ECS resource separately. The ARN and resource ID format of a resource is + defined by the opt-in status of the user or role that created the resource. You must turn + on this setting to use Amazon ECS features such as resource tagging. taskLongArnFormat - + When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type + for a specified user, role, or the root user for an account is affected. The opt-in and + opt-out account setting must be set for each Amazon ECS resource separately. The ARN and + resource ID format of a resource is defined by the opt-in status of the user or role that + created the resource. You must turn on this setting to use Amazon ECS features such as + resource tagging. containerInstanceLongArnFormat - When modified, the Amazon Resource + Name (ARN) and resource ID format of the resource type for a specified user, role, or the + root user for an account is affected. The opt-in and opt-out account setting must be set + for each Amazon ECS resource separately. The ARN and resource ID format of a resource is + defined by the opt-in status of the user or role that created the resource. You must turn + on this setting to use Amazon ECS features such as resource tagging. awsvpcTrunking - + When modified, the elastic network interface (ENI) limit for any new container instances + that support the feature is changed. If awsvpcTrunking is turned on, any new container + instances that support the feature are launched have the increased ENI limits available to + them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic + Container Service Developer Guide. containerInsights - When modified, the default + setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on + for your clusters is changed. If containerInsights is turned on, any new clusters that are + created will have Container Insights turned on unless you disable it during cluster + creation. For more information, see CloudWatch Container Insights in the Amazon Elastic + Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in + dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address + assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, + see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched + on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode - If you specify + fargateFIPSMode, Fargate FIPS 140 compliance is affected. + fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or + infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to + be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to + configure the wait time to retire a Fargate task. For information about the Fargate tasks + maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer + Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for + resource creation. Users must have permissions for actions that create the resource, such + as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services + performs additional authorization to verify if users or roles have permissions to create + tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For + more information, see Grant permission to tag resources on creation in the Amazon ECS + Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in + Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by + your security administrator in your Amazon ECS account. Amazon GuardDuty controls this + account setting on your behalf. For more information, see Protecting Amazon ECS workloads + with Amazon ECS Runtime Monitoring. - `value`: The account setting value for the specified principal ARN. Accepted values are - enabled, disabled, on, and off. + enabled, disabled, on, and off. When you specify fargateTaskRetirementWaitPeriod for the + name, the following are the valid values: 0 - Amazon Web Services sends the + notification, and immediately retires the affected tasks. 7 - Amazon Web Services sends + the notification, and waits 7 calendar days to retire the tasks. 14 - Amazon Web + Services sends the notification, and waits 14 calendar days to retire the tasks. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"principalArn"`: The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it modifies the account setting for all users, roles, and the root user of the account unless a user or role explicitly overrides these settings. If this - field is omitted, the setting is changed only for the authenticated user. Federated users - assume the account setting of the root user and can't have explicit account settings set - for them. + field is omitted, the setting is changed only for the authenticated user. You must use the + root user when you set the Fargate wait time (fargateTaskRetirementWaitPeriod). Federated + users assume the account setting of the root user and can't have explicit account settings + set for them. """ function put_account_setting(name, value; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( @@ -1907,22 +1954,61 @@ Modifies an account setting for all users on an account for whom no individual a setting has been specified. Account settings are set on a per-Region basis. # Arguments -- `name`: The resource name for which to modify the account setting. If - serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If - taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is - affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your - Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit - for your Amazon ECS container instances is affected. If containerInsights is specified, the - default setting for Amazon Web Services CloudWatch Container Insights for your clusters is - affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources - on creation is affected. For information about the opt-in timeline, see Tagging - authorization timeline in the Amazon ECS Developer Guide. When you specify fargateFIPSMode - for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic - algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see - Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance - in the Amazon Elastic Container Service Developer Guide. +- `name`: The resource name for which to modify the account setting. The following are the + valid values for the account setting name. serviceLongArnFormat - When modified, the + Amazon Resource Name (ARN) and resource ID format of the resource type for a specified + user, role, or the root user for an account is affected. The opt-in and opt-out account + setting must be set for each Amazon ECS resource separately. The ARN and resource ID format + of a resource is defined by the opt-in status of the user or role that created the + resource. You must turn on this setting to use Amazon ECS features such as resource + tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource + ID format of the resource type for a specified user, role, or the root user for an account + is affected. The opt-in and opt-out account setting must be set for each Amazon ECS + resource separately. The ARN and resource ID format of a resource is defined by the opt-in + status of the user or role that created the resource. You must turn on this setting to use + Amazon ECS features such as resource tagging. containerInstanceLongArnFormat - When + modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a + specified user, role, or the root user for an account is affected. The opt-in and opt-out + account setting must be set for each Amazon ECS resource separately. The ARN and resource + ID format of a resource is defined by the opt-in status of the user or role that created + the resource. You must turn on this setting to use Amazon ECS features such as resource + tagging. awsvpcTrunking - When modified, the elastic network interface (ENI) limit for + any new container instances that support the feature is changed. If awsvpcTrunking is + turned on, any new container instances that support the feature are launched have the + increased ENI limits available to them. For more information, see Elastic Network Interface + Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - + When modified, the default setting indicating whether Amazon Web Services CloudWatch + Container Insights is turned on for your clusters is changed. If containerInsights is + turned on, any new clusters that are created will have Container Insights turned on unless + you disable it during cluster creation. For more information, see CloudWatch Container + Insights in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When + turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode + can have an IPv6 address assigned. For more information on using IPv6 with tasks launched + on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using + IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode + - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. + fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or + infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to + be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to + configure the wait time to retire a Fargate task. For information about the Fargate tasks + maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer + Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for + resource creation. Users must have permissions for actions that create the resource, such + as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services + performs additional authorization to verify if users or roles have permissions to create + tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For + more information, see Grant permission to tag resources on creation in the Amazon ECS + Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in + Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by + your security administrator in your Amazon ECS account. Amazon GuardDuty controls this + account setting on your behalf. For more information, see Protecting Amazon ECS workloads + with Amazon ECS Runtime Monitoring. - `value`: The account setting value for the specified principal ARN. Accepted values are - enabled, disabled, on, and off. + enabled, disabled, on, and off. When you specify fargateTaskRetirementWaitPeriod for the + name, the following are the valid values: 0 - Amazon Web Services sends the + notification, and immediately retires the affected tasks. 7 - Amazon Web Services sends + the notification, and waits 7 calendar days to retire the tasks. 14 - Amazon Web + Services sends the notification, and waits 14 calendar days to retire the tasks. """ function put_account_setting_default( @@ -2186,8 +2272,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Linux platform 1.4.0 or later. - `"ephemeralStorage"`: The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the - default amount, for tasks hosted on Fargate. For more information, see Fargate task storage - in the Amazon ECS User Guide for Fargate. For tasks using the Fargate launch type, the + default amount, for tasks hosted on Fargate. For more information, see Using data volumes + in tasks in the Amazon ECS Developer Guide. For tasks using the Fargate launch type, the task requires the following platforms: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. - `"executionRoleArn"`: The Amazon Resource Name (ARN) of the task execution role that @@ -2253,14 +2339,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instantiations of the same task on a single container instance when port mappings are used. For more information, see Network settings in the Docker run reference. - `"pidMode"`: The process namespace to use for the containers in the task. The valid - values are host or task. If host is specified, then all containers within the tasks that - specified the host PID mode on the same container instance share the same process namespace - with the host Amazon EC2 instance. If task is specified, all containers within the - specified task share the same process namespace. If no value is specified, the default is a - private namespace. For more information, see PID settings in the Docker run reference. If - the host PID mode is used, be aware that there is a heightened risk of undesired process - namespace expose. For more information, see Docker security. This parameter is not - supported for Windows containers or tasks run on Fargate. + values are host or task. On Fargate for Linux containers, the only valid value is task. For + example, monitoring sidecars might need pidMode to access information about other + containers running in the same task. If host is specified, all containers within the tasks + that specified the host PID mode on the same container instance share the same process + namespace with the host Amazon EC2 instance. If task is specified, all containers within + the specified task share the same process namespace. If no value is specified, the default + is a private namespace for each container. For more information, see PID settings in the + Docker run reference. If the host PID mode is used, there's a heightened risk of undesired + process namespace exposure. For more information, see Docker security. This parameter is + not supported for Windows containers. This parameter is only supported for tasks that are + hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This + isn't supported for Windows containers on Fargate. - `"placementConstraints"`: An array of placement constraint objects to use for the task. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the task definition and those specified at runtime. @@ -2276,8 +2366,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys against the compatibilities specified. If no value is specified, the parameter is omitted from the response. - `"runtimePlatform"`: The operating system that your tasks definitions run on. A platform - family is specified only for tasks using the Fargate launch type. When you specify a task - definition in a service, this value must match the runtimePlatform value of the service. + family is specified only for tasks using the Fargate launch type. - `"tags"`: The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 @@ -2336,19 +2425,24 @@ end run_task(task_definition) run_task(task_definition, params::Dict{String,<:Any}) -Starts a new task using the specified task definition. You can allow Amazon ECS to place -tasks for you, or you can customize how Amazon ECS places tasks using placement constraints -and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic -Container Service Developer Guide. Alternatively, you can use StartTask to use your own -scheduler or place tasks manually on specific container instances. Starting April 15, -2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), -and will help current customers migrate their workloads to options that offer better price -and performance. After April 15, 2023, new customers will not be able to launch instances -with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, -customers who have used Amazon EI at least once during the past 30-day period are -considered current customers and will be able to continue using the service. The Amazon -ECS API follows an eventual consistency model. This is because of the distributed nature of -the system supporting the API. This means that the result of an API command you run that +Starts a new task using the specified task definition. On March 21, 2024, a change was +made to resolve the task definition revision before authorization. When a task definition +revision is not specified, authorization will occur using the latest revision of a task +definition. You can allow Amazon ECS to place tasks for you, or you can customize how +Amazon ECS places tasks using placement constraints and placement strategies. For more +information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. +Alternatively, you can use StartTask to use your own scheduler or place tasks manually on +specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard +new customers to Amazon Elastic Inference (EI), and will help current customers migrate +their workloads to options that offer better price and performance. After April 15, 2023, +new customers will not be able to launch instances with Amazon EI accelerators in Amazon +SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least +once during the past 30-day period are considered current customers and will be able to +continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by +configuring the volume when creating or updating a service. For more infomation, see Amazon +EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API +follows an eventual consistency model. This is because of the distributed nature of the +system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: @@ -2363,19 +2457,15 @@ gradually up to about five minutes of wait time. # Arguments - `task_definition`: The family and revision (family:revision) or full ARN of the task - definition to run. If a revision isn't specified, the latest ACTIVE revision is used. When - you create a policy for run-task, you can set the resource to be the latest task definition - revision, or a specific revision. The full ARN value must match the value that you - specified as the Resource of the principal's permissions policy. When you specify the - policy resource as the latest task definition version (by setting the Resource in the - policy to arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName), then set this - value to arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName. When you - specify the policy resource as a specific task definition version (by setting the Resource - in the policy to arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1 or - arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*), then set this value - to arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1. For more - information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service - developer Guide. + definition to run. If a revision isn't specified, the latest ACTIVE revision is used. The + full ARN value must match the value that you specified as the Resource of the principal's + permissions policy. When you specify a task definition, you must either specify a specific + revision, or all revisions in the ARN. To specify a specific revision, include the revision + number in the ARN. For example, to specify revision 2, use + arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2. To specify all + revisions, use the wildcard (*) in the ARN. For example, to specify all revisions, use + arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*. For more information, + see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2385,6 +2475,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for the cluster is used. When you use cluster auto scaling, you must specify capacityProviderStrategy and not launchType. A capacity provider strategy may contain a maximum of 6 capacity providers. +- `"clientToken"`: An identifier that you provide to ensure the idempotency of the request. + It must be unique and is case sensitive. Up to 64 characters are allowed. The valid + characters are characters in the range of 33-126, inclusive. For more information, see + Ensuring idempotency. - `"cluster"`: The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. If you do not specify a cluster, the default cluster is assumed. - `"count"`: The number of instantiations of the specified task to place on your cluster. @@ -2402,8 +2496,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more - information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate. The - EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster. The + information, see Fargate capacity providers in the Amazon ECS Developer Guide. The EC2 + launch type runs your tasks on Amazon EC2 instances registered to your cluster. The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster. A task can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter @@ -2441,7 +2535,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the - startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), and + startedBy value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), and underscores (_) are allowed. If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it. - `"tags"`: The metadata that you apply to the task to help you categorize and organize @@ -2457,11 +2551,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. +- `"volumeConfigurations"`: The details of the volume that was configuredAtLaunch. You can + configure the size, volumeType, IOPS, throughput, snapshot and encryption in in + TaskManagedEBSVolumeConfiguration. The name of the volume must match the name from the task + definition. """ function run_task(taskDefinition; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( "RunTask", - Dict{String,Any}("taskDefinition" => taskDefinition); + Dict{String,Any}( + "taskDefinition" => taskDefinition, "clientToken" => string(uuid4()) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2474,7 +2574,13 @@ function run_task( return ecs( "RunTask", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("taskDefinition" => taskDefinition), params) + mergewith( + _merge, + Dict{String,Any}( + "taskDefinition" => taskDefinition, "clientToken" => string(uuid4()) + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2486,14 +2592,19 @@ end start_task(container_instances, task_definition, params::Dict{String,<:Any}) Starts a new task from the specified task definition on the specified container instance or -instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to -Amazon Elastic Inference (EI), and will help current customers migrate their workloads to -options that offer better price and performance. After April 15, 2023, new customers will -not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon -ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the -past 30-day period are considered current customers and will be able to continue using the -service. Alternatively, you can use RunTask to place tasks for you. For more information, -see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. +instances. On March 21, 2024, a change was made to resolve the task definition revision +before authorization. When a task definition revision is not specified, authorization will +occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web +Services will not onboard new customers to Amazon Elastic Inference (EI), and will help +current customers migrate their workloads to options that offer better price and +performance. After April 15, 2023, new customers will not be able to launch instances with +Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers +who have used Amazon EI at least once during the past 30-day period are considered current +customers and will be able to continue using the service. Alternatively, you can use +RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon +Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS +tasks by configuring the volume when creating or updating a service. For more infomation, +see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. # Arguments - `container_instances`: The container instance IDs or full ARN entries for the container @@ -2546,6 +2657,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. +- `"volumeConfigurations"`: The details of the volume that was configuredAtLaunch. You can + configure the size, volumeType, IOPS, throughput, snapshot and encryption in + TaskManagedEBSVolumeConfiguration. The name of the volume must match the name from the task + definition. """ function start_task( containerInstances, taskDefinition; aws_config::AbstractAWSConfig=global_aws_config() @@ -2591,9 +2706,12 @@ called on a task, the equivalent of docker stop is issued to the containers runn task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is -sent. The default 30-second timeout can be configured on the Amazon ECS container agent -with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS -Container Agent Configuration in the Amazon Elastic Container Service Developer Guide. +sent. For Windows containers, POSIX signals do not work and runtime stops the container by +sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful +shutdown of (Windows) container #25982 on GitHub. The default 30-second timeout can be +configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. +For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic +Container Service Developer Guide. # Arguments - `task`: The task ID of the task to stop. @@ -2605,7 +2723,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"reason"`: An optional message specified when a task is stopped. For example, if you're using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task. - Up to 255 characters are allowed in this message. """ function stop_task(task; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( @@ -3116,71 +3233,82 @@ end update_service(service) update_service(service, params::Dict{String,<:Any}) -Modifies the parameters of a service. For services using the rolling update (ECS) you can -update the desired count, deployment configuration, network configuration, load balancers, -service registries, enable ECS managed tags option, propagate tags option, task placement -constraints and strategies, and task definition. When you update any of these parameters, -Amazon ECS starts new tasks with the new configuration. For services using the blue/green -(CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, -health check grace period, task placement constraints and strategies, enable ECS managed -tags option, and propagate tags can be updated using this API. If the network -configuration, platform version, task definition, or load balancer need to be updated, -create a new CodeDeploy deployment. For more information, see CreateDeployment in the -CodeDeploy API Reference. For services using an external deployment controller, you can -update only the desired count, task placement constraints and strategies, health check -grace period, enable ECS managed tags option, and propagate tags option, using this API. If -the launch type, load balancer, network configuration, platform version, or task definition -need to be updated, create a new task set For more information, see CreateTaskSet. You can -add to or subtract from the number of instantiations of a task definition in a service by -specifying the cluster that the service is running in and a new desiredCount parameter. If -you have updated the Docker image of your application, you can create a new task definition -with that image and deploy it to your service. The service scheduler uses the minimum -healthy percent and maximum percent parameters (in the service's deployment configuration) -to determine the deployment strategy. If your updated Docker image uses the same tag as -what is in the existing task definition for your service (for example, my_image:latest), -you don't need to create a new revision of your task definition. You can update the service -using the forceNewDeployment option. The new tasks launched by the deployment pull the -current image/tag combination from your repository when they start. You can also update -the deployment configuration of a service. When a deployment is triggered by updating the -task definition of a service, the service scheduler uses the deployment configuration -parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. - If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily -during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows -the scheduler to stop two existing tasks before starting two new tasks. Tasks for services -that don't use a load balancer are considered healthy if they're in the RUNNING state. -Tasks for services that use a load balancer are considered healthy if they're in the -RUNNING state and are reported as healthy by the load balancer. The maximumPercent -parameter represents an upper limit on the number of running tasks during a deployment. You -can use it to define the deployment batch size. For example, if desiredCount is four tasks, -a maximum of 200% starts four new tasks before stopping the four older tasks (provided that -the cluster resources required to do this are available). When UpdateService stops a task -during a deployment, the equivalent of docker stop is issued to the containers running in -the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent -and the containers are forcibly stopped. If the container handles the SIGTERM gracefully -and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service -scheduler launches new tasks, it determines task placement in your cluster with the -following logic. Determine which of the container instances in your cluster can support -your service's task definition. For example, they have the required CPU, memory, ports, and -container instance attributes. By default, the service scheduler attempts to balance -tasks across Availability Zones in this manner even though you can choose a different -placement strategy. Sort the valid container instances by the fewest number of running -tasks for this service in the same Availability Zone as the instance. For example, if zone -A has one running service task and zones B and C each have zero, valid container instances -in either zone B or C are considered optimal for placement. Place the new service task on -a valid container instance in an optimal Availability Zone (based on the previous steps), -favoring container instances with the fewest number of running tasks for this service. -When the service scheduler stops running tasks, it attempts to maintain balance across the -Availability Zones in your cluster using the following logic: Sort the container -instances by the largest number of running tasks for this service in the same Availability -Zone as the instance. For example, if zone A has one running service task and zones B and C -each have two, container instances in either zone B or C are considered optimal for -termination. Stop the task on a container instance in an optimal Availability Zone (based -on the previous steps), favoring container instances with the largest number of running -tasks for this service. You must have a service-linked role when you update any of the -following service properties. If you specified a custom role when you created the service, -Amazon ECS automatically replaces the roleARN associated with the service with the ARN of -your service-linked role. For more information, see Service-linked roles in the Amazon -Elastic Container Service Developer Guide. loadBalancers, serviceRegistries +Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the +task definition revision before authorization. When a task definition revision is not +specified, authorization will occur using the latest revision of a task definition. For +services using the rolling update (ECS) you can update the desired count, deployment +configuration, network configuration, load balancers, service registries, enable ECS +managed tags option, propagate tags option, task placement constraints and strategies, and +task definition. When you update any of these parameters, Amazon ECS starts new tasks with +the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by +configuring the volume when starting or running a task, or when creating or updating a +service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container +Service Developer Guide. You can update your volume configurations and trigger a new +deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON +service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For +more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service +Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, +only the desired count, deployment configuration, health check grace period, task placement +constraints and strategies, enable ECS managed tags option, and propagate tags can be +updated using this API. If the network configuration, platform version, task definition, or +load balancer need to be updated, create a new CodeDeploy deployment. For more information, +see CreateDeployment in the CodeDeploy API Reference. For services using an external +deployment controller, you can update only the desired count, task placement constraints +and strategies, health check grace period, enable ECS managed tags option, and propagate +tags option, using this API. If the launch type, load balancer, network configuration, +platform version, or task definition need to be updated, create a new task set For more +information, see CreateTaskSet. You can add to or subtract from the number of +instantiations of a task definition in a service by specifying the cluster that the service +is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon +ECS tasks by configuring the volume when starting or running a task, or when creating or +updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic +Container Service Developer Guide. If you have updated the container image of your +application, you can create a new task definition with that image and deploy it to your +service. The service scheduler uses the minimum healthy percent and maximum percent +parameters (in the service's deployment configuration) to determine the deployment +strategy. If your updated Docker image uses the same tag as what is in the existing task +definition for your service (for example, my_image:latest), you don't need to create a new +revision of your task definition. You can update the service using the forceNewDeployment +option. The new tasks launched by the deployment pull the current image/tag combination +from your repository when they start. You can also update the deployment configuration of +a service. When a deployment is triggered by updating the task definition of a service, the +service scheduler uses the deployment configuration parameters, minimumHealthyPercent and +maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below +100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, +if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing +tasks before starting two new tasks. Tasks for services that don't use a load balancer are +considered healthy if they're in the RUNNING state. Tasks for services that use a load +balancer are considered healthy if they're in the RUNNING state and are reported as healthy +by the load balancer. The maximumPercent parameter represents an upper limit on the +number of running tasks during a deployment. You can use it to define the deployment batch +size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks +before stopping the four older tasks (provided that the cluster resources required to do +this are available). When UpdateService stops a task during a deployment, the equivalent +of docker stop is issued to the containers running in the task. This results in a SIGTERM +and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly +stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from +receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it +determines task placement in your cluster with the following logic. Determine which of +the container instances in your cluster can support your service's task definition. For +example, they have the required CPU, memory, ports, and container instance attributes. By +default, the service scheduler attempts to balance tasks across Availability Zones in this +manner even though you can choose a different placement strategy. Sort the valid +container instances by the fewest number of running tasks for this service in the same +Availability Zone as the instance. For example, if zone A has one running service task and +zones B and C each have zero, valid container instances in either zone B or C are +considered optimal for placement. Place the new service task on a valid container +instance in an optimal Availability Zone (based on the previous steps), favoring container +instances with the fewest number of running tasks for this service. When the service +scheduler stops running tasks, it attempts to maintain balance across the Availability +Zones in your cluster using the following logic: Sort the container instances by the +largest number of running tasks for this service in the same Availability Zone as the +instance. For example, if zone A has one running service task and zones B and C each have +two, container instances in either zone B or C are considered optimal for termination. +Stop the task on a container instance in an optimal Availability Zone (based on the +previous steps), favoring container instances with the largest number of running tasks for +this service. You must have a service-linked role when you update any of the following +service properties: loadBalancers, serviceRegistries For more information about +the role see the CreateService request parameter role . # Arguments - `service`: The name of the service to update. @@ -3287,6 +3415,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys revision is used. If you modify the task definition with UpdateService, Amazon ECS spawns a task with the new version of the task definition and then stops an old task after the new version is running. +- `"volumeConfigurations"`: The details of the volume that was configuredAtLaunch. You can + configure the size, volumeType, IOPS, throughput, snapshot and encryption in + ServiceManagedEBSVolumeConfiguration. The name of the volume must match the name from the + task definition. If set to null, no new deployment is triggered. Otherwise, if this + configuration differs from the existing one, it triggers a new deployment. """ function update_service(service; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( diff --git a/src/services/efs.jl b/src/services/efs.jl index 53fd04f413..c401d7506d 100644 --- a/src/services/efs.jl +++ b/src/services/efs.jl @@ -16,10 +16,14 @@ exposed as the access point's root directory. Applications using the access poin access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points. If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near -the limit of 1000 access points, you may experience a throttling response for these +the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit. This operation requires permissions for the elasticfilesystem:CreateAccessPoint -action. +action. Access points can be tagged on creation. If tags are specified in the creation +action, IAM performs additional authorization on the elasticfilesystem:TagResource action +to verify if users have permissions to create tags. Therefore, you must grant explicit +permissions to use the elasticfilesystem:TagResource action. For more information, see +Granting permissions to tag resources during creation. # Arguments - `client_token`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure @@ -30,10 +34,10 @@ action. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"PosixUser"`: The operating system user and group applied to all file system requests made using the access point. -- `"RootDirectory"`: Specifies the directory on the Amazon EFS file system that the access - point exposes as the root directory of your file system to NFS clients using the access - point. The clients using the access point can only access the root directory and below. If - the RootDirectory > Path specified does not exist, EFS creates it and applies the +- `"RootDirectory"`: Specifies the directory on the EFS file system that the access point + exposes as the root directory of your file system to NFS clients using the access point. + The clients using the access point can only access the root directory and below. If the + RootDirectory > Path specified does not exist, Amazon EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you must provide the Path, and the CreationInfo. Amazon EFS creates a root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions for @@ -90,7 +94,7 @@ does the following: Creates a new, empty file system. The file system will hav EFS assigned ID, and an initial lifecycle state creating. Returns with the description of the created file system. Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system. For basic use cases, you can use a randomly -generated UUID for the creation token. The idempotent operation allows you to retry a +generated UUID for the creation token. The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your @@ -101,17 +105,24 @@ Amazon EFS User Guide. The CreateFileSystem call returns while the file system' state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state. This operation accepts an optional PerformanceMode parameter that you choose for your file -system. We recommend generalPurpose performance mode for most file systems. File systems -using the maxIO performance mode can scale to higher levels of aggregate throughput and -operations per second with a tradeoff of slightly higher latencies for most file -operations. The performance mode can't be changed after the file system has been created. -For more information, see Amazon EFS performance modes. You can set the throughput mode for -the file system using the ThroughputMode parameter. After the file system is fully created, -Amazon EFS sets its lifecycle state to available, at which point you can create one or more -mount targets for the file system in your VPC. For more information, see CreateMountTarget. -You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount -target. For more information, see Amazon EFS: How it Works. This operation requires -permissions for the elasticfilesystem:CreateFileSystem action. +system. We recommend generalPurpose performance mode for all file systems. File systems +using the maxIO mode is a previous generation performance type that is designed for highly +parallelized workloads that can tolerate higher latencies than the General Purpose mode. +Max I/O mode is not supported for One Zone file systems or file systems that use Elastic +throughput. Due to the higher per-operation latencies with Max I/O, we recommend using +General Purpose performance mode for all file systems. The performance mode can't be +changed after the file system has been created. For more information, see Amazon EFS +performance modes. You can set the throughput mode for the file system using the +ThroughputMode parameter. After the file system is fully created, Amazon EFS sets its +lifecycle state to available, at which point you can create one or more mount targets for +the file system in your VPC. For more information, see CreateMountTarget. You mount your +Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more +information, see Amazon EFS: How it Works. This operation requires permissions for the +elasticfilesystem:CreateFileSystem action. File systems can be tagged on creation. If tags +are specified in the creation action, IAM performs additional authorization on the +elasticfilesystem:TagResource action to verify if users have permissions to create tags. +Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource +action. For more information, see Granting permissions to tag resources during creation. # Arguments - `creation_token`: A string of up to 64 ASCII characters. Amazon EFS uses this to ensure @@ -119,18 +130,18 @@ permissions for the elasticfilesystem:CreateFileSystem action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AvailabilityZoneName"`: Used to create a file system that uses One Zone storage - classes. It specifies the Amazon Web Services Availability Zone in which to create the file - system. Use the format us-east-1a to specify the Availability Zone. For more information - about One Zone storage classes, see Using EFS storage classes in the Amazon EFS User Guide. - One Zone storage classes are not available in all Availability Zones in Amazon Web - Services Regions where Amazon EFS is available. +- `"AvailabilityZoneName"`: Used to create a One Zone file system. It specifies the Amazon + Web Services Availability Zone in which to create the file system. Use the format + us-east-1a to specify the Availability Zone. For more information about One Zone file + systems, see Using EFS storage classes in the Amazon EFS User Guide. One Zone file systems + are not available in all Availability Zones in Amazon Web Services Regions where Amazon EFS + is available. - `"Backup"`: Specifies whether automatic backups are enabled on the file system that you - are creating. Set the value to true to enable automatic backups. If you are creating a file - system that uses One Zone storage classes, automatic backups are enabled by default. For - more information, see Automatic backups in the Amazon EFS User Guide. Default is false. - However, if you specify an AvailabilityZoneName, the default is true. Backup is not - available in all Amazon Web Services Regions where Amazon EFS is available. + are creating. Set the value to true to enable automatic backups. If you are creating a One + Zone file system, automatic backups are enabled by default. For more information, see + Automatic backups in the Amazon EFS User Guide. Default is false. However, if you specify + an AvailabilityZoneName, the default is true. Backup is not available in all Amazon Web + Services Regions where Amazon EFS is available. - `"Encrypted"`: A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying an existing Key Management Service key (KMS key). If you don't specify a KMS key, then the default KMS key @@ -147,17 +158,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys If you use KmsKeyId, you must set the CreateFileSystemRequestEncrypted parameter to true. EFS accepts only symmetric KMS keys. You cannot use asymmetric KMS keys with Amazon EFS file systems. -- `"PerformanceMode"`: The performance mode of the file system. We recommend generalPurpose - performance mode for most file systems. File systems using the maxIO performance mode can +- `"PerformanceMode"`: The Performance mode of the file system. We recommend generalPurpose + performance mode for all file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed - after the file system has been created. The maxIO mode is not supported on file systems - using One Zone storage classes. -- `"ProvisionedThroughputInMibps"`: The throughput, measured in MiB/s, that you want to - provision for a file system that you're creating. Valid values are 1-1024. Required if - ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To - increase this limit, contact Amazon Web Services Support. For more information, see Amazon - EFS quotas that you can increase in the Amazon EFS User Guide. + after the file system has been created. The maxIO mode is not supported on One Zone file + systems. Due to the higher per-operation latencies with Max I/O, we recommend using + General Purpose performance mode for all file systems. Default is generalPurpose. +- `"ProvisionedThroughputInMibps"`: The throughput, measured in mebibytes per second + (MiBps), that you want to provision for a file system that you're creating. Required if + ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit + depending on Region. To increase this limit, contact Amazon Web Services Support. For more + information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide. - `"Tags"`: Use to create one or more tags associated with the file system. Each tag is a user-defined key-value pair. Name your file system on creation by including a \"Key\":\"Name\",\"Value\":\"{value}\" key-value pair. Each key must be unique. For more @@ -166,9 +178,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ThroughputMode"`: Specifies the throughput mode for the file system. The mode can be bursting, provisioned, or elastic. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps. After you create the file system, you can - decrease your file system's throughput in Provisioned Throughput mode or change between the - throughput modes, with certain time restrictions. For more information, see Specifying - throughput with provisioned mode in the Amazon EFS User Guide. Default is bursting. + decrease your file system's Provisioned throughput or change between the throughput modes, + with certain time restrictions. For more information, see Specifying throughput with + provisioned mode in the Amazon EFS User Guide. Default is bursting. """ function create_file_system( CreationToken; aws_config::AbstractAWSConfig=global_aws_config() @@ -207,62 +219,61 @@ Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone sh single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. You -can create only one mount target for an EFS file system using One Zone storage classes. You -must create that mount target in the same Availability Zone in which the file system is -located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the -DescribeFileSystems response object to get this information. Use the subnetId associated -with the file system's Availability Zone when creating the mount target. For more -information, see Amazon EFS: How it Works. To create a mount target for a file system, the -file system's lifecycle state must be available. For more information, see -DescribeFileSystems. In the request, provide the following: The file system ID for which -you are creating the mount target. A subnet ID, which determines the following: The VPC -in which Amazon EFS creates the mount target The Availability Zone in which Amazon EFS -creates the mount target The IP address range from which Amazon EFS selects the IP -address of the mount target (if you don't specify an IP address in the request) After -creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and -an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You -can also use the mount target's DNS name when mounting the file system. The EC2 instance on -which you mount the file system by using the mount target can resolve the mount target's -DNS name to its IP address. For more information, see How it Works: Implementation -Overview. Note that you can create mount targets for a file system in only one VPC, and -there can be only one mount target per Availability Zone. That is, if the file system -already has one or more mount targets created for it, the subnet specified in the request -to add another mount target must meet the following requirements: Must belong to the same -VPC as the subnets of the existing mount targets Must not be in the same Availability -Zone as any of the subnets of the existing mount targets If the request satisfies the -requirements, Amazon EFS does the following: Creates a new mount target in the specified -subnet. Also creates a new network interface in the subnet as follows: If the request -provides an IpAddress, Amazon EFS assigns that IP address to the network interface. -Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon -EC2 CreateNetworkInterface call does when a request does not specify a primary private IP -address). If the request provides SecurityGroups, this network interface is associated -with those security groups. Otherwise, it belongs to the default security group for the -subnet's VPC. Assigns the description Mount target fsmt-id for file system fs-id where -fsmt-id is the mount target ID, and fs-id is the FileSystemId. Sets the -requesterManaged property of the network interface to true, and the requesterId value to -EFS. Each Amazon EFS mount target has one corresponding requester-managed EC2 network -interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId -field in the mount target's description to the network interface ID, and the IpAddress -field to its address. If network interface creation fails, the entire CreateMountTarget -operation fails. The CreateMountTarget call returns only after creating the network -interface, but while the mount target state is still creating, you can check the mount -target creation status by calling the DescribeMountTargets operation, which among other -things returns the mount target state. We recommend that you create a mount target in each -of the Availability Zones. There are cost considerations for using a file system in an -Availability Zone through a mount target created in another Availability Zone. For more -information, see Amazon EFS. In addition, by always using a mount target local to the -instance's Availability Zone, you eliminate a partial failure scenario. If the Availability -Zone in which your mount target is created goes down, then you can't access your file -system through that mount target. This operation requires permissions for the following -action on the file system: elasticfilesystem:CreateMountTarget This operation also -requires permissions for the following Amazon EC2 actions: ec2:DescribeSubnets -ec2:DescribeNetworkInterfaces ec2:CreateNetworkInterface +can create only one mount target for a One Zone file system. You must create that mount +target in the same Availability Zone in which the file system is located. Use the +AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response +object to get this information. Use the subnetId associated with the file system's +Availability Zone when creating the mount target. For more information, see Amazon EFS: How +it Works. To create a mount target for a file system, the file system's lifecycle state +must be available. For more information, see DescribeFileSystems. In the request, provide +the following: The file system ID for which you are creating the mount target. A subnet +ID, which determines the following: The VPC in which Amazon EFS creates the mount target + The Availability Zone in which Amazon EFS creates the mount target The IP address range +from which Amazon EFS selects the IP address of the mount target (if you don't specify an +IP address in the request) After creating the mount target, Amazon EFS returns a +response that includes, a MountTargetId and an IpAddress. You use this IP address when +mounting the file system in an EC2 instance. You can also use the mount target's DNS name +when mounting the file system. The EC2 instance on which you mount the file system by using +the mount target can resolve the mount target's DNS name to its IP address. For more +information, see How it Works: Implementation Overview. Note that you can create mount +targets for a file system in only one VPC, and there can be only one mount target per +Availability Zone. That is, if the file system already has one or more mount targets +created for it, the subnet specified in the request to add another mount target must meet +the following requirements: Must belong to the same VPC as the subnets of the existing +mount targets Must not be in the same Availability Zone as any of the subnets of the +existing mount targets If the request satisfies the requirements, Amazon EFS does the +following: Creates a new mount target in the specified subnet. Also creates a new +network interface in the subnet as follows: If the request provides an IpAddress, Amazon +EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free +address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does +when a request does not specify a primary private IP address). If the request provides +SecurityGroups, this network interface is associated with those security groups. Otherwise, +it belongs to the default security group for the subnet's VPC. Assigns the description +Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and +fs-id is the FileSystemId. Sets the requesterManaged property of the network interface +to true, and the requesterId value to EFS. Each Amazon EFS mount target has one +corresponding requester-managed EC2 network interface. After the network interface is +created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to +the network interface ID, and the IpAddress field to its address. If network interface +creation fails, the entire CreateMountTarget operation fails. The CreateMountTarget call +returns only after creating the network interface, but while the mount target state is +still creating, you can check the mount target creation status by calling the +DescribeMountTargets operation, which among other things returns the mount target state. +We recommend that you create a mount target in each of the Availability Zones. There are +cost considerations for using a file system in an Availability Zone through a mount target +created in another Availability Zone. For more information, see Amazon EFS. In addition, by +always using a mount target local to the instance's Availability Zone, you eliminate a +partial failure scenario. If the Availability Zone in which your mount target is created +goes down, then you can't access your file system through that mount target. This +operation requires permissions for the following action on the file system: +elasticfilesystem:CreateMountTarget This operation also requires permissions for the +following Amazon EC2 actions: ec2:DescribeSubnets ec2:DescribeNetworkInterfaces +ec2:CreateNetworkInterface # Arguments - `file_system_id`: The ID of the file system for which to create the mount target. -- `subnet_id`: The ID of the subnet to add the mount target in. For file systems that use - One Zone storage classes, use the subnet that is associated with the file system's - Availability Zone. +- `subnet_id`: The ID of the subnet to add the mount target in. For One Zone file systems, + use the subnet that is associated with the file system's Availability Zone. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -308,35 +319,38 @@ end Creates a replication configuration that replicates an existing EFS file system to a new, read-only file system. For more information, see Amazon EFS replication in the Amazon EFS -User Guide. The replication configuration specifies the following: Source file system - -An existing EFS file system that you want replicated. The source file system cannot be a -destination file system in an existing replication configuration. Destination file -system configuration - The configuration of the destination file system to which the source -file system will be replicated. There can only be one destination file system in a -replication configuration. The destination file system configuration consists of the -following properties: Amazon Web Services Region - The Amazon Web Services Region in -which the destination file system is created. Amazon EFS replication is available in all -Amazon Web Services Regions that Amazon EFS is available in, except Africa (Cape Town), -Asia Pacific (Hong Kong), Asia Pacific (Jakarta), Europe (Milan), and Middle East -(Bahrain). Availability Zone - If you want the destination file system to use EFS One -Zone availability and durability, you must specify the Availability Zone to create the file -system in. For more information about EFS storage classes, see Amazon EFS storage classes -in the Amazon EFS User Guide. Encryption - All destination file systems are created with -encryption at rest enabled. You can specify the Key Management Service (KMS) key that is -used to encrypt the destination file system. If you don't specify a KMS key, your -service-managed KMS key for Amazon EFS is used. After the file system is created, you -cannot change the KMS key. The following properties are set by default: Performance -mode - The destination file system's performance mode matches that of the source file -system, unless the destination file system uses EFS One Zone storage. In that case, the -General Purpose performance mode is used. The performance mode cannot be changed. -Throughput mode - The destination file system's throughput mode matches that of the source -file system. After the file system is created, you can modify the throughput mode. The -following properties are turned off by default: Lifecycle management - EFS lifecycle -management and EFS Intelligent-Tiering are not enabled on the destination file system. -After the destination file system is created, you can enable EFS lifecycle management and -EFS Intelligent-Tiering. Automatic backups - Automatic daily backups not enabled on the -destination file system. After the file system is created, you can change this setting. -For more information, see Amazon EFS replication in the Amazon EFS User Guide. +User Guide. The replication configuration specifies the following: Source file system +– The EFS file system that you want replicated. The source file system cannot be a +destination file system in an existing replication configuration. Amazon Web Services +Region – The Amazon Web Services Region in which the destination file system is created. +Amazon EFS replication is available in all Amazon Web Services Regions in which EFS is +available. The Region must be enabled. For more information, see Managing Amazon Web +Services Regions in the Amazon Web Services General Reference Reference Guide. +Destination file system configuration – The configuration of the destination file system +to which the source file system will be replicated. There can only be one destination file +system in a replication configuration. Parameters for the replication configuration +include: File system ID – The ID of the destination file system for the replication. +If no ID is provided, then EFS creates a new file system with the default settings. For +existing file systems, the file system's replication overwrite protection must be disabled. +For more information, see Replicating to an existing file system. Availability Zone – +If you want the destination file system to use One Zone storage, you must specify the +Availability Zone to create the file system in. For more information, see EFS file system +types in the Amazon EFS User Guide. Encryption – All destination file systems are +created with encryption at rest enabled. You can specify the Key Management Service (KMS) +key that is used to encrypt the destination file system. If you don't specify a KMS key, +your service-managed KMS key for Amazon EFS is used. After the file system is created, +you cannot change the KMS key. After the file system is created, you cannot change +the KMS key. For new destination file systems, the following properties are set by +default: Performance mode - The destination file system's performance mode matches that +of the source file system, unless the destination file system uses EFS One Zone storage. In +that case, the General Purpose performance mode is used. The performance mode cannot be +changed. Throughput mode - The destination file system's throughput mode matches that of +the source file system. After the file system is created, you can modify the throughput +mode. Lifecycle management – Lifecycle management is not enabled on the destination +file system. After the destination file system is created, you can enable lifecycle +management. Automatic backups – Automatic daily backups are enabled on the destination +file system. After the file system is created, you can change this setting. For more +information, see Amazon EFS replication in the Amazon EFS User Guide. # Arguments - `destinations`: An array of destination configuration objects. Only one destination @@ -585,11 +599,11 @@ end delete_replication_configuration(source_file_system_id) delete_replication_configuration(source_file_system_id, params::Dict{String,<:Any}) -Deletes an existing replication configuration. To delete a replication configuration, you -must make the request from the Amazon Web Services Region in which the destination file -system is located. Deleting a replication configuration ends the replication process. After -a replication configuration is deleted, the destination file system is no longer read-only. -You can write to the destination file system after its status becomes Writeable. +Deletes a replication configuration. Deleting a replication configuration ends the +replication process. After a replication configuration is deleted, the destination file +system becomes Writeable and its replication overwrite protection is re-enabled. For more +information, see Delete a replication configuration. This operation requires permissions +for the elasticfilesystem:DeleteReplicationConfiguration action. # Arguments - `source_file_system_id`: The ID of the source file system in the replication @@ -709,8 +723,7 @@ end describe_account_preferences(params::Dict{String,<:Any}) Returns the account preferences settings for the Amazon Web Services account associated -with the user making the request, in the current Amazon Web Services Region. For more -information, see Managing Amazon EFS resource IDs. +with the user making the request, in the current Amazon Web Services Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -747,7 +760,7 @@ end Returns the backup policy for the specified EFS file system. # Arguments -- `file_system_id`: Specifies which EFS file system to retrieve the BackupPolicy for. +- `file_system_id`: Specifies which EFS file system for which to retrieve the BackupPolicy. """ function describe_backup_policy( @@ -867,11 +880,9 @@ end describe_lifecycle_configuration(file_system_id, params::Dict{String,<:Any}) Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. -EFS lifecycle management uses the LifecycleConfiguration object to identify which files to -move to the EFS Infrequent Access (IA) storage class. For a file system without a -LifecycleConfiguration object, the call returns an empty array in the response. When EFS -Intelligent-Tiering is enabled, TransitionToPrimaryStorageClass has a value of -AFTER_1_ACCESS. This operation requires permissions for the +Lifecycle management uses the LifecycleConfiguration object to identify when to move files +between storage classes. For a file system without a LifecycleConfiguration object, the +call returns an empty array in the response. This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation. # Arguments @@ -1311,44 +1322,52 @@ end put_lifecycle_configuration(file_system_id, lifecycle_policies) put_lifecycle_configuration(file_system_id, lifecycle_policies, params::Dict{String,<:Any}) -Use this action to manage EFS lifecycle management and EFS Intelligent-Tiering. A -LifecycleConfiguration consists of one or more LifecyclePolicy objects that define the -following: EFS Lifecycle management - When Amazon EFS automatically transitions files in -a file system into the lower-cost EFS Infrequent Access (IA) storage class. To enable EFS -Lifecycle management, set the value of TransitionToIA to one of the available options. -EFS Intelligent-Tiering - When Amazon EFS automatically transitions files from IA back into -the file system's primary storage class (EFS Standard or EFS One Zone Standard). To enable -EFS Intelligent-Tiering, set the value of TransitionToPrimaryStorageClass to -AFTER_1_ACCESS. For more information, see EFS Lifecycle Management. Each Amazon EFS file -system supports one lifecycle configuration, which applies to all files in the file system. -If a LifecycleConfiguration object already exists for the specified file system, a +Use this action to manage storage for your file system. A LifecycleConfiguration consists +of one or more LifecyclePolicy objects that define the following: TransitionToIA – +When to move files in the file system from primary storage (Standard storage class) into +the Infrequent Access (IA) storage. TransitionToArchive – When to move files in the +file system from their current storage class (either IA or Standard storage) into the +Archive storage. File systems cannot transition into Archive storage before transitioning +into IA storage. Therefore, TransitionToArchive must either not be set or must be later +than TransitionToIA. The Archive storage class is available only for file systems that +use the Elastic Throughput mode and the General Purpose Performance mode. +TransitionToPrimaryStorageClass – Whether to move files in the file system back to +primary storage (Standard storage class) after they are accessed in IA or Archive storage. + For more information, see Managing file system storage. Each Amazon EFS file system +supports one lifecycle configuration, which applies to all files in the file system. If a +LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body -deletes any existing LifecycleConfiguration and turns off lifecycle management and EFS -Intelligent-Tiering for the file system. In the request, specify the following: The ID -for the file system for which you are enabling, disabling, or modifying lifecycle -management and EFS Intelligent-Tiering. A LifecyclePolicies array of LifecyclePolicy -objects that define when files are moved into IA storage, and when they are moved back to -Standard storage. Amazon EFS requires that each LifecyclePolicy object have only have a -single transition, so the LifecyclePolicies array needs to be structured with separate -LifecyclePolicy objects. See the example requests in the following section for more -information. This operation requires permissions for the -elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration -object to an encrypted file system, you need the same Key Management Service permissions as -when you created the encrypted file system. +deletes any existing LifecycleConfiguration. In the request, specify the following: The +ID for the file system for which you are enabling, disabling, or modifying Lifecycle +management. A LifecyclePolicies array of LifecyclePolicy objects that define when to move +files to IA storage, to Archive storage, and back to primary storage. Amazon EFS requires +that each LifecyclePolicy object have only have a single transition, so the +LifecyclePolicies array needs to be structured with separate LifecyclePolicy objects. See +the example requests in the following section for more information. This operation +requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To +apply a LifecycleConfiguration object to an encrypted file system, you need the same Key +Management Service permissions as when you created the encrypted file system. # Arguments - `file_system_id`: The ID of the file system for which you are creating the LifecycleConfiguration object (String). - `lifecycle_policies`: An array of LifecyclePolicy objects that define the file system's - LifecycleConfiguration object. A LifecycleConfiguration object informs EFS lifecycle - management and EFS Intelligent-Tiering of the following: When to move files in the file - system from primary storage to the IA storage class. When to move files that are in IA - storage to primary storage. When using the put-lifecycle-configuration CLI command or - the PutLifecycleConfiguration API action, Amazon EFS requires that each LifecyclePolicy - object have only a single transition. This means that in a request body, LifecyclePolicies - must be structured as an array of LifecyclePolicy objects, one object for each transition, - TransitionToIA, TransitionToPrimaryStorageClass. See the example requests in the following + LifecycleConfiguration object. A LifecycleConfiguration object informs EFS Lifecycle + management of the following: TransitionToIA – When to move files in the file system + from primary storage (Standard storage class) into the Infrequent Access (IA) storage. + TransitionToArchive – When to move files in the file system from their current storage + class (either IA or Standard storage) into the Archive storage. File systems cannot + transition into Archive storage before transitioning into IA storage. Therefore, + TransitionToArchive must either not be set or must be later than TransitionToIA. The + Archive storage class is available only for file systems that use the Elastic Throughput + mode and the General Purpose Performance mode. TransitionToPrimaryStorageClass – + Whether to move files in the file system back to primary storage (Standard storage class) + after they are accessed in IA or Archive storage. When using the + put-lifecycle-configuration CLI command or the PutLifecycleConfiguration API action, Amazon + EFS requires that each LifecyclePolicy object have only a single transition. This means + that in a request body, LifecyclePolicies must be structured as an array of LifecyclePolicy + objects, one object for each storage transition. See the example requests in the following section for more information. """ @@ -1471,10 +1490,11 @@ system. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ProvisionedThroughputInMibps"`: (Optional) Sets the amount of provisioned throughput, - in MiB/s, for the file system. Valid values are 1-1024. If you are changing the throughput - mode to provisioned, you must also provide the amount of provisioned throughput. Required - if ThroughputMode is changed to provisioned on update. +- `"ProvisionedThroughputInMibps"`: (Optional) The throughput, measured in mebibytes per + second (MiBps), that you want to provision for a file system that you're creating. Required + if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper + limit depending on Region. To increase this limit, contact Amazon Web Services Support. For + more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide. - `"ThroughputMode"`: (Optional) Updates the file system's throughput mode. If you're not updating your throughput mode, you don't need to provide this value in your request. If you are changing the ThroughputMode to provisioned, you must also set a value for @@ -1501,3 +1521,50 @@ function update_file_system( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_file_system_protection(file_system_id) + update_file_system_protection(file_system_id, params::Dict{String,<:Any}) + +Updates protection on the file system. This operation requires permissions for the +elasticfilesystem:UpdateFileSystemProtection action. + +# Arguments +- `file_system_id`: The ID of the file system to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ReplicationOverwriteProtection"`: The status of the file system's replication overwrite + protection. ENABLED – The file system cannot be used as the destination file system in + a replication configuration. The file system is writeable. Replication overwrite protection + is ENABLED by default. DISABLED – The file system can be used as the destination file + system in a replication configuration. The file system is read-only and can only be + modified by EFS replication. REPLICATING – The file system is being used as the + destination file system in a replication configuration. The file system is read-only and is + only modified only by EFS replication. If the replication configuration is deleted, the + file system's replication overwrite protection is re-enabled, the file system becomes + writeable. +""" +function update_file_system_protection( + FileSystemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return efs( + "PUT", + "/2015-02-01/file-systems/$(FileSystemId)/protection"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_file_system_protection( + FileSystemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return efs( + "PUT", + "/2015-02-01/file-systems/$(FileSystemId)/protection", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/eks.jl b/src/services/eks.jl index f1967fcdd8..0c0fe28615 100644 --- a/src/services/eks.jl +++ b/src/services/eks.jl @@ -4,23 +4,79 @@ using AWS.AWSServices: eks using AWS.Compat using AWS.UUIDs +""" + associate_access_policy(access_scope, name, policy_arn, principal_arn) + associate_access_policy(access_scope, name, policy_arn, principal_arn, params::Dict{String,<:Any}) + +Associates an access policy and its scope to an access entry. For more information about +associating access policies, see Associating and disassociating access policies to and from +access entries in the Amazon EKS User Guide. + +# Arguments +- `access_scope`: The scope for the AccessPolicy. You can scope access policies to an + entire cluster or to specific Kubernetes namespaces. +- `name`: The name of your cluster. +- `policy_arn`: The ARN of the AccessPolicy that you're associating. For a list of ARNs, + use ListAccessPolicies. +- `principal_arn`: The Amazon Resource Name (ARN) of the IAM user or role for the + AccessEntry that you're associating the access policy to. + +""" +function associate_access_policy( + accessScope, + name, + policyArn, + principalArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies", + Dict{String,Any}("accessScope" => accessScope, "policyArn" => policyArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_access_policy( + accessScope, + name, + policyArn, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("accessScope" => accessScope, "policyArn" => policyArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_encryption_config(encryption_config, name) associate_encryption_config(encryption_config, name, params::Dict{String,<:Any}) -Associate encryption configuration to an existing cluster. You can use this API to enable -encryption on existing clusters which do not have encryption already enabled. This allows -you to implement a defense-in-depth security strategy without migrating applications to new +Associates an encryption configuration to an existing cluster. Use this API to enable +encryption on existing clusters that don't already have encryption enabled. This allows you +to implement a defense-in-depth security strategy without migrating applications to new Amazon EKS clusters. # Arguments - `encryption_config`: The configuration you are using for encryption. -- `name`: The name of the cluster that you are associating with encryption configuration. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The client request token you are using with the encryption - configuration. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. """ function associate_encryption_config( encryptionConfig, name; aws_config::AbstractAWSConfig=global_aws_config() @@ -63,23 +119,24 @@ end associate_identity_provider_config(name, oidc) associate_identity_provider_config(name, oidc, params::Dict{String,<:Any}) -Associate an identity provider configuration to a cluster. If you want to authenticate +Associates an identity provider configuration to a cluster. If you want to authenticate identities using an identity provider, you can create an identity provider configuration and associate it to your cluster. After configuring authentication to your cluster you can -create Kubernetes roles and clusterroles to assign permissions to the roles, and then bind -the roles to the identities using Kubernetes rolebindings and clusterrolebindings. For more -information see Using RBAC Authorization in the Kubernetes documentation. +create Kubernetes Role and ClusterRole objects, assign permissions to them, and then bind +them to the identities using Kubernetes RoleBinding and ClusterRoleBinding objects. For +more information see Using RBAC Authorization in the Kubernetes documentation. # Arguments -- `name`: The name of the cluster to associate the configuration to. +- `name`: The name of your cluster. - `oidc`: An object representing an OpenID Connect (OIDC) identity provider configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"tags"`: The metadata to apply to the configuration to assist with categorization and - organization. Each tag consists of a key and an optional value. You define both. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. """ function associate_identity_provider_config( name, oidc; aws_config::AbstractAWSConfig=global_aws_config() @@ -113,6 +170,103 @@ function associate_identity_provider_config( ) end +""" + create_access_entry(name, principal_arn) + create_access_entry(name, principal_arn, params::Dict{String,<:Any}) + +Creates an access entry. An access entry allows an IAM principal to access your cluster. +Access entries can replace the need to maintain entries in the aws-auth ConfigMap for +authentication. You have the following options for authorizing an IAM principal to access +Kubernetes objects on your cluster: Kubernetes role-based access control (RBAC), Amazon +EKS, or both. Kubernetes RBAC authorization requires you to create and manage Kubernetes +Role, ClusterRole, RoleBinding, and ClusterRoleBinding objects, in addition to managing +access entries. If you use Amazon EKS authorization exclusively, you don't need to create +and manage Kubernetes Role, ClusterRole, RoleBinding, and ClusterRoleBinding objects. For +more information about access entries, see Access entries in the Amazon EKS User Guide. + +# Arguments +- `name`: The name of your cluster. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. You can specify one + ARN for each access entry. You can't specify the same ARN in more than one access entry. + This value can't be changed after access entry creation. The valid principals differ + depending on the type of the access entry in the type field. The only valid ARN is IAM + roles for the types of access entries for nodes: . You can use every IAM principal type + for STANDARD access entries. You can't use the STS session principal type with access + entries because this is a temporary principal for each session and not a permanent identity + that can be assigned permissions. IAM best practices recommend using IAM roles with + temporary credentials, rather than IAM users with long-term credentials. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"kubernetesGroups"`: The value for name that you've specified for kind: Group as a + subject in a Kubernetes RoleBinding or ClusterRoleBinding object. Amazon EKS doesn't + confirm that the value for name exists in any bindings on your cluster. You can specify one + or more names. Kubernetes authorizes the principalArn of the access entry to access any + cluster objects that you've specified in a Kubernetes Role or ClusterRole object that is + also specified in a binding's roleRef. For more information about creating Kubernetes + RoleBinding, ClusterRoleBinding, Role, or ClusterRole objects, see Using RBAC Authorization + in the Kubernetes documentation. If you want Amazon EKS to authorize the principalArn + (instead of, or in addition to Kubernetes authorizing the principalArn), you can associate + one or more access policies to the access entry using AssociateAccessPolicy. If you + associate any access policies, the principalARN has all permissions assigned in the + associated access policies and all permissions in any Kubernetes Role or ClusterRole + objects that the group names are bound to. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. +- `"type"`: The type of the new access entry. Valid values are Standard, FARGATE_LINUX, + EC2_LINUX, and EC2_WINDOWS. If the principalArn is for an IAM role that's used for + self-managed Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants the + necessary permissions to the node for you. If the principalArn is for any other purpose, + specify STANDARD. If you don't specify a value, Amazon EKS sets the value to STANDARD. It's + unnecessary to create access entries for IAM roles used with Fargate profiles or managed + Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth ConfigMap for the + roles. You can't change this value once you've created the access entry. If you set the + value to EC2_LINUX or EC2_WINDOWS, you can't specify values for kubernetesGroups, or + associate an AccessPolicy to the access entry. +- `"username"`: The username to authenticate to Kubernetes with. We recommend not + specifying a username and letting Amazon EKS specify it for you. For more information about + the value Amazon EKS specifies for you, or constraints before specifying your own username, + see Creating access entries in the Amazon EKS User Guide. +""" +function create_access_entry( + name, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/clusters/$(name)/access-entries", + Dict{String,Any}( + "principalArn" => principalArn, "clientRequestToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_access_entry( + name, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/access-entries", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "principalArn" => principalArn, "clientRequestToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_addon(addon_name, name) create_addon(addon_name, name, params::Dict{String,<:Any}) @@ -122,9 +276,9 @@ lifecycle management of common operational software for Amazon EKS clusters. For information, see Amazon EKS add-ons in the Amazon EKS User Guide. # Arguments -- `addon_name`: The name of the add-on. The name must match one of the names that - DescribeAddonVersions returns. -- `name`: The name of the cluster to create the add-on for. +- `addon_name`: The name of the add-on. The name must match one of the names returned by + DescribeAddonVersions. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -133,18 +287,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - `"configurationValues"`: The set of configuration values for the add-on that's created. - The values that you provide are validated against the schema in DescribeAddonConfiguration - . + The values that you provide are validated against the schema returned by + DescribeAddonConfiguration. +- `"podIdentityAssociations"`: An array of Pod Identity Assocations to be created. Each EKS + Pod Identity association maps a Kubernetes service account to an IAM Role. For more + information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS + User Guide. - `"resolveConflicts"`: How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: None – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. Overwrite – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. Preserve - – Not supported. You can set this value when updating an add-on though. For more - information, see UpdateAddon. If you don't currently have the self-managed version of the - add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all - values to default values, regardless of the option that you specify. + – This is similar to the NONE option. If the self-managed version of the add-on is + installed on your cluster Amazon EKS doesn't change the add-on resource properties. + Creation of the add-on might fail if conflicts are detected. This option works differently + during the update operation. For more information, see UpdateAddon. If you don't + currently have the self-managed version of the add-on installed on your cluster, the Amazon + EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the + option that you specify. - `"serviceAccountRoleArn"`: The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions @@ -152,8 +313,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon EKS User Guide. To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide. -- `"tags"`: The metadata to apply to the cluster to assist with categorization and - organization. Each tag consists of a key and an optional value. You define both. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. """ function create_addon(addonName, name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( @@ -191,21 +353,30 @@ end create_cluster(name, resources_vpc_config, role_arn) create_cluster(name, resources_vpc_config, role_arn, params::Dict{String,<:Any}) -Creates an Amazon EKS control plane. The Amazon EKS control plane consists of control -plane instances that run the Kubernetes software, such as etcd and the API server. The -control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is -exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is -single tenant and unique. It runs on its own set of Amazon EC2 instances. The cluster -control plane is provisioned across multiple Availability Zones and fronted by an Elastic -Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces -in your VPC subnets to provide connectivity from the control plane instances to the nodes -(for example, to support kubectl exec, logs, and proxy data flows). Amazon EKS nodes run in -your Amazon Web Services account and connect to your cluster's control plane over the -Kubernetes API server endpoint and a certificate file that is created for your cluster. In -most cases, it takes several minutes to create a cluster. After you create an Amazon EKS -cluster, you must configure your Kubernetes tooling to communicate with the API server and -launch nodes into your cluster. For more information, see Managing Cluster Authentication -and Launching Amazon EKS nodes in the Amazon EKS User Guide. +Creates an Amazon EKS control plane. The Amazon EKS control plane consists of control plane +instances that run the Kubernetes software, such as etcd and the API server. The control +plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed +by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single +tenant and unique. It runs on its own set of Amazon EC2 instances. The cluster control +plane is provisioned across multiple Availability Zones and fronted by an Elastic Load +Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in +your VPC subnets to provide connectivity from the control plane instances to the nodes (for +example, to support kubectl exec, logs, and proxy data flows). Amazon EKS nodes run in your +Amazon Web Services account and connect to your cluster's control plane over the Kubernetes +API server endpoint and a certificate file that is created for your cluster. You can use +the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public +and private access to your cluster's Kubernetes API server endpoint. By default, public +access is enabled, and private access is disabled. For more information, see Amazon EKS +Cluster Endpoint Access Control in the Amazon EKS User Guide . You can use the logging +parameter to enable or disable exporting the Kubernetes control plane logs for your cluster +to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch +Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS +User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to +exported control plane logs. For more information, see CloudWatch Pricing. In most cases, +it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you +must configure your Kubernetes tooling to communicate with the API server and launch nodes +into your cluster. For more information, see Allowing users to access your cluster and +Launching Amazon EKS nodes in the Amazon EKS User Guide. # Arguments - `name`: The unique name to give to your cluster. @@ -222,8 +393,9 @@ and Launching Amazon EKS nodes in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. +- `"accessConfig"`: The access configuration for the cluster. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. - `"encryptionConfig"`: The encryption configuration for the cluster. - `"kubernetesNetworkConfig"`: The Kubernetes network configuration for the cluster. - `"logging"`: Enable or disable exporting the Kubernetes control plane logs for your @@ -236,8 +408,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys review Local clusters for Amazon EKS on Amazon Web Services Outposts in the Amazon EKS User Guide. This object isn't available for creating Amazon EKS clusters on the Amazon Web Services cloud. -- `"tags"`: The metadata to apply to the cluster to assist with categorization and - organization. Each tag consists of a key and an optional value. You define both. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. - `"version"`: The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. @@ -285,6 +458,76 @@ function create_cluster( ) end +""" + create_eks_anywhere_subscription(name, term) + create_eks_anywhere_subscription(name, term, params::Dict{String,<:Any}) + +Creates an EKS Anywhere subscription. When a subscription is created, it is a contract +agreement for the length of the term specified in the request. Licenses that are used to +validate support are provisioned in Amazon Web Services License Manager and the caller +account is granted access to EKS Anywhere Curated Packages. + +# Arguments +- `name`: The unique name for your subscription. It must be unique in your Amazon Web + Services account in the Amazon Web Services Region you're creating the subscription in. The + name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. + It must start with an alphabetic character and can't be longer than 100 characters. +- `term`: An object representing the term duration and term unit type of your subscription. + This determines the term length of your subscription. Valid values are MONTHS for term unit + and 12 or 36 for term duration, indicating a 12 month or 36 month subscription. This value + cannot be changed after creating the subscription. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoRenew"`: A boolean indicating whether the subscription auto renews at the end of + the term. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"licenseQuantity"`: The number of licenses to purchase with the subscription. Valid + values are between 1 and 100. This value can't be changed after creating the subscription. +- `"licenseType"`: The license type for all licenses in the subscription. Valid value is + CLUSTER. With the CLUSTER license type, each license covers support for a single EKS + Anywhere cluster. +- `"tags"`: The metadata for a subscription to assist with categorization and organization. + Each tag consists of a key and an optional value. Subscription tags don't propagate to any + other resources associated with the subscription. +""" +function create_eks_anywhere_subscription( + name, term; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/eks-anywhere-subscriptions", + Dict{String,Any}( + "name" => name, "term" => term, "clientRequestToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_eks_anywhere_subscription( + name, + term, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/eks-anywhere-subscriptions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "term" => term, "clientRequestToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_fargate_profile(fargate_profile_name, name, pod_execution_role_arn) create_fargate_profile(fargate_profile_name, name, pod_execution_role_arn, params::Dict{String,<:Any}) @@ -308,31 +551,30 @@ However, you can create a new updated profile to replace an existing profile and delete the original after the updated profile has finished creating. If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster. For more -information, see Fargate Profile in the Amazon EKS User Guide. +information, see Fargate profile in the Amazon EKS User Guide. # Arguments - `fargate_profile_name`: The name of the Fargate profile. -- `name`: The name of the Amazon EKS cluster to apply the Fargate profile to. -- `pod_execution_role_arn`: The Amazon Resource Name (ARN) of the pod execution role to use - for pods that match the selectors in the Fargate profile. The pod execution role allows +- `name`: The name of your cluster. +- `pod_execution_role_arn`: The Amazon Resource Name (ARN) of the Pod execution role to use + for a Pod that matches the selectors in the Fargate profile. The Pod execution role allows Fargate infrastructure to register with your cluster as a node, and it provides read access - to Amazon ECR image repositories. For more information, see Pod Execution Role in the + to Amazon ECR image repositories. For more information, see Pod execution role in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"selectors"`: The selectors to match for pods to use this Fargate profile. Each selector - must have an associated namespace. Optionally, you can also specify labels for a namespace. - You may specify up to five selectors in a Fargate profile. -- `"subnets"`: The IDs of subnets to launch your pods into. At this time, pods running on - Fargate are not assigned public IP addresses, so only private subnets (with no direct route - to an Internet Gateway) are accepted for this parameter. -- `"tags"`: The metadata to apply to the Fargate profile to assist with categorization and - organization. Each tag consists of a key and an optional value. You define both. Fargate - profile tags do not propagate to any other resources associated with the Fargate profile, - such as the pods that are scheduled with it. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"selectors"`: The selectors to match for a Pod to use this Fargate profile. Each + selector must have an associated Kubernetes namespace. Optionally, you can also specify + labels for a namespace. You may specify up to five selectors in a Fargate profile. +- `"subnets"`: The IDs of subnets to launch a Pod into. A Pod running on Fargate isn't + assigned a public IP address, so only private subnets (with no direct route to an Internet + Gateway) are accepted for this parameter. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. """ function create_fargate_profile( fargateProfileName, @@ -386,14 +628,15 @@ Creates a managed node group for an Amazon EKS cluster. You can only create a no for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more -information about using launch templates, see Launch template support. An Amazon EKS -managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances -that are managed by Amazon Web Services for an Amazon EKS cluster. For more information, -see Managed node groups in the Amazon EKS User Guide. Windows AMI types are only supported -for commercial Regions that support Windows Amazon EKS. +information about using launch templates, see Customizing managed nodes with launch +templates. An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and +associated Amazon EC2 instances that are managed by Amazon Web Services for an Amazon EKS +cluster. For more information, see Managed node groups in the Amazon EKS User Guide. +Windows AMI types are only supported for commercial Amazon Web Services Regions that +support Windows on Amazon EKS. # Arguments -- `name`: The name of the cluster to create the node group in. +- `name`: The name of your cluster. - `node_role`: The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile @@ -402,12 +645,14 @@ for commercial Regions that support Windows Amazon EKS. information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with - Amazon EKS, see Launch template support in the Amazon EKS User Guide. + Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User + Guide. - `nodegroup_name`: The unique name to give your node group. - `subnets`: The subnets to use for the Auto Scaling group that is created for your node group. If you specify launchTemplate, then don't specify SubnetId in your launch template, or the node group deployment will fail. For more information about using launch - templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + templates with Amazon EKS, see Customizing managed nodes with launch templates in the + Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -415,16 +660,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add eks:kube-proxy-windows to your Windows nodes rolearn in the aws-auth ConfigMap. For more - information about using launch templates with Amazon EKS, see Launch template support in - the Amazon EKS User Guide. + information about using launch templates with Amazon EKS, see Customizing managed nodes + with launch templates in the Amazon EKS User Guide. - `"capacityType"`: The capacity type for your node group. -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. - `"diskSize"`: The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify launchTemplate, then don't specify diskSize, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, - see Launch template support in the Amazon EKS User Guide. + see Customizing managed nodes with launch templates in the Amazon EKS User Guide. - `"instanceTypes"`: Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in @@ -433,13 +678,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more - information, see Managed node group capacity types and Launch template support in the - Amazon EKS User Guide. -- `"labels"`: The Kubernetes labels to be applied to the nodes in the node group when they - are created. + information, see Managed node group capacity types and Customizing managed nodes with + launch templates in the Amazon EKS User Guide. +- `"labels"`: The Kubernetes labels to apply to the nodes in the node group when they are + created. - `"launchTemplate"`: An object representing a node group's launch template specification. - If specified, then do not specify instanceTypes, diskSize, or remoteAccess and make sure - that the launch template meets the requirements in launchTemplateSpecification. + When using this object, don't directly specify instanceTypes, diskSize, or remoteAccess. + Make sure that the launch template meets the requirements in launchTemplateSpecification. + Also refer to Customizing managed nodes with launch templates in the Amazon EKS User Guide. - `"releaseVersion"`: The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon @@ -448,18 +694,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using - launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + launch templates with Amazon EKS, see Customizing managed nodes with launch templates in + the Amazon EKS User Guide. - `"remoteAccess"`: The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information - about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS - User Guide. + about using launch templates with Amazon EKS, see Customizing managed nodes with launch + templates in the Amazon EKS User Guide. - `"scalingConfig"`: The scaling configuration details for the Auto Scaling group that is created for your node group. -- `"tags"`: The metadata to apply to the node group to assist with categorization and - organization. Each tag consists of a key and an optional value. You define both. Node group - tags do not propagate to any other resources associated with the node group, such as the - Amazon EC2 instances or subnets. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. - `"taints"`: The Kubernetes taints to be applied to the nodes in the node group. For more information, see Node taints on managed node groups. - `"updateConfig"`: The node group update configuration. @@ -467,7 +713,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using - launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + launch templates with Amazon EKS, see Customizing managed nodes with launch templates in + the Amazon EKS User Guide. """ function create_nodegroup( name, @@ -517,17 +764,148 @@ function create_nodegroup( ) end +""" + create_pod_identity_association(name, namespace, role_arn, service_account) + create_pod_identity_association(name, namespace, role_arn, service_account, params::Dict{String,<:Any}) + +Creates an EKS Pod Identity association between a service account in an Amazon EKS cluster +and an IAM role with EKS Pod Identity. Use EKS Pod Identity to give temporary IAM +credentials to pods and the credentials are rotated automatically. Amazon EKS Pod Identity +associations provide the ability to manage credentials for your applications, similar to +the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. If a +pod uses a service account that has an association, Amazon EKS sets environment variables +in the containers of the pod. The environment variables configure the Amazon Web Services +SDKs, including the Command Line Interface, to use the EKS Pod Identity credentials. Pod +Identity is a simpler method than IAM roles for service accounts, as this method doesn't +use OIDC identity providers. Additionally, you can configure a role for Pod Identity once, +and reuse it across clusters. + +# Arguments +- `name`: The name of the cluster to create the association in. +- `namespace`: The name of the Kubernetes namespace inside the cluster to create the + association in. The service account and the pods that use the service account must be in + this namespace. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role to associate with the service + account. The EKS Pod Identity agent manages credentials to assume this role for + applications in the containers in the pods that use this service account. +- `service_account`: The name of the Kubernetes service account inside the cluster to + associate the IAM credentials with. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. The following basic restrictions apply to tags: Maximum + number of tags per resource – 50 For each resource, each tag key must be unique, and + each tag key can have only one value. Maximum key length – 128 Unicode characters in + UTF-8 Maximum value length – 256 Unicode characters in UTF-8 If your tagging schema + is used across multiple services and resources, remember that other services may have + restrictions on allowed characters. Generally allowed characters are: letters, numbers, and + spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys + and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase + combination of such as a prefix for either keys or values as it is reserved for Amazon Web + Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this + prefix do not count against your tags per resource limit. +""" +function create_pod_identity_association( + name, + namespace, + roleArn, + serviceAccount; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/pod-identity-associations", + Dict{String,Any}( + "namespace" => namespace, + "roleArn" => roleArn, + "serviceAccount" => serviceAccount, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_pod_identity_association( + name, + namespace, + roleArn, + serviceAccount, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/pod-identity-associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "namespace" => namespace, + "roleArn" => roleArn, + "serviceAccount" => serviceAccount, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_access_entry(name, principal_arn) + delete_access_entry(name, principal_arn, params::Dict{String,<:Any}) + +Deletes an access entry. Deleting an access entry of a type other than Standard can cause +your cluster to function improperly. If you delete an access entry in error, you can +recreate it. + +# Arguments +- `name`: The name of your cluster. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. + +""" +function delete_access_entry( + name, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "DELETE", + "/clusters/$(name)/access-entries/$(principalArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_access_entry( + name, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "DELETE", + "/clusters/$(name)/access-entries/$(principalArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_addon(addon_name, name) delete_addon(addon_name, name, params::Dict{String,<:Any}) -Delete an Amazon EKS add-on. When you remove the add-on, it will also be deleted from the -cluster. You can always manually start an add-on on the cluster using the Kubernetes API. +Deletes an Amazon EKS add-on. When you remove an add-on, it's deleted from the cluster. You +can always manually start an add-on on the cluster using the Kubernetes API. # Arguments - `addon_name`: The name of the add-on. The name must match one of the names returned by ListAddons . -- `name`: The name of the cluster to delete the add-on from. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -562,13 +940,13 @@ end delete_cluster(name) delete_cluster(name, params::Dict{String,<:Any}) -Deletes the Amazon EKS cluster control plane. If you have active services in your cluster +Deletes an Amazon EKS cluster control plane. If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more -information, see Deleting a Cluster in the Amazon EKS User Guide. If you have managed node +information, see Deleting a cluster in the Amazon EKS User Guide. If you have managed node groups or Fargate profiles attached to the cluster, you must delete them first. For more -information, see DeleteNodegroup and DeleteFargateProfile. +information, see DeleteNodgroup and DeleteFargateProfile. # Arguments - `name`: The name of the cluster to delete. @@ -594,21 +972,55 @@ function delete_cluster( ) end +""" + delete_eks_anywhere_subscription(id) + delete_eks_anywhere_subscription(id, params::Dict{String,<:Any}) + +Deletes an expired or inactive subscription. Deleting inactive subscriptions removes them +from the Amazon Web Services Management Console view and from list/describe API responses. +Subscriptions can only be cancelled within 7 days of creation and are cancelled by creating +a ticket in the Amazon Web Services Support Center. + +# Arguments +- `id`: The ID of the subscription. + +""" +function delete_eks_anywhere_subscription( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "DELETE", + "/eks-anywhere-subscriptions/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_eks_anywhere_subscription( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "DELETE", + "/eks-anywhere-subscriptions/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_fargate_profile(fargate_profile_name, name) delete_fargate_profile(fargate_profile_name, name, params::Dict{String,<:Any}) -Deletes an Fargate profile. When you delete a Fargate profile, any pods running on Fargate -that were created with the profile are deleted. If those pods match another Fargate -profile, then they are scheduled on Fargate with that profile. If they no longer match any -Fargate profiles, then they are not scheduled on Fargate and they may remain in a pending -state. Only one Fargate profile in a cluster can be in the DELETING status at a time. You -must wait for a Fargate profile to finish deleting before you can delete any other profiles -in that cluster. +Deletes an Fargate profile. When you delete a Fargate profile, any Pod running on Fargate +that was created with the profile is deleted. If the Pod matches another Fargate profile, +then it is scheduled on Fargate with that profile. If it no longer matches any Fargate +profiles, then it's not scheduled on Fargate and may remain in a pending state. Only one +Fargate profile in a cluster can be in the DELETING status at a time. You must wait for a +Fargate profile to finish deleting before you can delete any other profiles in that cluster. # Arguments - `fargate_profile_name`: The name of the Fargate profile to delete. -- `name`: The name of the Amazon EKS cluster associated with the Fargate profile to delete. +- `name`: The name of your cluster. """ function delete_fargate_profile( @@ -640,10 +1052,10 @@ end delete_nodegroup(name, nodegroup_name) delete_nodegroup(name, nodegroup_name, params::Dict{String,<:Any}) -Deletes an Amazon EKS node group for a cluster. +Deletes a managed node group. # Arguments -- `name`: The name of the Amazon EKS cluster that is associated with your node group. +- `name`: The name of your cluster. - `nodegroup_name`: The name of the node group to delete. """ @@ -672,11 +1084,52 @@ function delete_nodegroup( ) end +""" + delete_pod_identity_association(association_id, name) + delete_pod_identity_association(association_id, name, params::Dict{String,<:Any}) + +Deletes a EKS Pod Identity association. The temporary Amazon Web Services credentials from +the previous IAM role session might still be valid until the session expiry. If you need to +immediately revoke the temporary session credentials, then go to the role in the IAM +console. + +# Arguments +- `association_id`: The ID of the association to be deleted. +- `name`: The cluster name that + +""" +function delete_pod_identity_association( + associationId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "DELETE", + "/clusters/$(name)/pod-identity-associations/$(associationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_pod_identity_association( + associationId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "DELETE", + "/clusters/$(name)/pod-identity-associations/$(associationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ deregister_cluster(name) deregister_cluster(name, params::Dict{String,<:Any}) -Deregisters a connected cluster to remove it from the Amazon EKS control plane. +Deregisters a connected cluster to remove it from the Amazon EKS control plane. A connected +cluster is a Kubernetes cluster that you've connected to your control plane using the +Amazon EKS Connector. # Arguments - `name`: The name of the connected cluster to deregister. @@ -703,34 +1156,35 @@ function deregister_cluster( end """ - describe_addon(addon_name, name) - describe_addon(addon_name, name, params::Dict{String,<:Any}) + describe_access_entry(name, principal_arn) + describe_access_entry(name, principal_arn, params::Dict{String,<:Any}) -Describes an Amazon EKS add-on. +Describes an access entry. # Arguments -- `addon_name`: The name of the add-on. The name must match one of the names returned by - ListAddons . -- `name`: The name of the cluster. +- `name`: The name of your cluster. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. """ -function describe_addon(addonName, name; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_access_entry( + name, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) return eks( "GET", - "/clusters/$(name)/addons/$(addonName)"; + "/clusters/$(name)/access-entries/$(principalArn)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_addon( - addonName, +function describe_access_entry( name, + principalArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return eks( "GET", - "/clusters/$(name)/addons/$(addonName)", + "/clusters/$(name)/access-entries/$(principalArn)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -738,14 +1192,49 @@ function describe_addon( end """ - describe_addon_configuration(addon_name, addon_version) - describe_addon_configuration(addon_name, addon_version, params::Dict{String,<:Any}) - + describe_addon(addon_name, name) + describe_addon(addon_name, name, params::Dict{String,<:Any}) + +Describes an Amazon EKS add-on. + +# Arguments +- `addon_name`: The name of the add-on. The name must match one of the names returned by + ListAddons . +- `name`: The name of your cluster. + +""" +function describe_addon(addonName, name; aws_config::AbstractAWSConfig=global_aws_config()) + return eks( + "GET", + "/clusters/$(name)/addons/$(addonName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_addon( + addonName, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "GET", + "/clusters/$(name)/addons/$(addonName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_addon_configuration(addon_name, addon_version) + describe_addon_configuration(addon_name, addon_version, params::Dict{String,<:Any}) + Returns configuration options. # Arguments -- `addon_name`: The name of the add-on. The name must match one of the names that - DescribeAddonVersions returns. +- `addon_name`: The name of the add-on. The name must match one of the names returned by + DescribeAddonVersions. - `addon_version`: The version of the add-on. The version must match one of the versions returned by DescribeAddonVersions . @@ -794,12 +1283,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"addonName"`: The name of the add-on. The name must match one of the names returned by ListAddons . - `"kubernetesVersion"`: The Kubernetes versions that you can use the add-on with. -- `"maxResults"`: The maximum number of results to return. -- `"nextToken"`: The nextToken value returned from a previous paginated - DescribeAddonVersionsRequest where maxResults was used and the results exceeded the value - of that parameter. Pagination continues from the end of the previous results that returned - the nextToken value. This token should be treated as an opaque identifier that is used - only to retrieve the next items in a list and not for other programmatic purposes. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. - `"owners"`: The owner of the add-on. For valid owners, don't specify a value for this property. - `"publishers"`: The publisher of the add-on. For valid publishers, don't specify a value @@ -831,14 +1325,14 @@ end describe_cluster(name) describe_cluster(name, params::Dict{String,<:Any}) -Returns descriptive information about an Amazon EKS cluster. The API server endpoint and -certificate authority data returned by this operation are required for kubelet and kubectl -to communicate with your Kubernetes API server. For more information, see Create a -kubeconfig for Amazon EKS. The API server endpoint and certificate authority data aren't +Describes an Amazon EKS cluster. The API server endpoint and certificate authority data +returned by this operation are required for kubelet and kubectl to communicate with your +Kubernetes API server. For more information, see Creating or updating a kubeconfig file for +an Amazon EKS cluster. The API server endpoint and certificate authority data aren't available until the cluster reaches the ACTIVE state. # Arguments -- `name`: The name of the cluster to describe. +- `name`: The name of your cluster. """ function describe_cluster(name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -858,15 +1352,47 @@ function describe_cluster( ) end +""" + describe_eks_anywhere_subscription(id) + describe_eks_anywhere_subscription(id, params::Dict{String,<:Any}) + +Returns descriptive information about a subscription. + +# Arguments +- `id`: The ID of the subscription. + +""" +function describe_eks_anywhere_subscription( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/eks-anywhere-subscriptions/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_eks_anywhere_subscription( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/eks-anywhere-subscriptions/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_fargate_profile(fargate_profile_name, name) describe_fargate_profile(fargate_profile_name, name, params::Dict{String,<:Any}) -Returns descriptive information about an Fargate profile. +Describes an Fargate profile. # Arguments - `fargate_profile_name`: The name of the Fargate profile to describe. -- `name`: The name of the Amazon EKS cluster associated with the Fargate profile. +- `name`: The name of your cluster. """ function describe_fargate_profile( @@ -898,11 +1424,11 @@ end describe_identity_provider_config(identity_provider_config, name) describe_identity_provider_config(identity_provider_config, name, params::Dict{String,<:Any}) -Returns descriptive information about an identity provider configuration. +Describes an identity provider configuration. # Arguments - `identity_provider_config`: An object representing an identity provider configuration. -- `name`: The cluster name that the identity provider configuration is associated to. +- `name`: The name of your cluster. """ function describe_identity_provider_config( @@ -937,14 +1463,48 @@ function describe_identity_provider_config( ) end +""" + describe_insight(id, name) + describe_insight(id, name, params::Dict{String,<:Any}) + +Returns details about an insight that you specify using its ID. + +# Arguments +- `id`: The identity of the insight to describe. +- `name`: The name of the cluster to describe the insight for. + +""" +function describe_insight(id, name; aws_config::AbstractAWSConfig=global_aws_config()) + return eks( + "GET", + "/clusters/$(name)/insights/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_insight( + id, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "GET", + "/clusters/$(name)/insights/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_nodegroup(name, nodegroup_name) describe_nodegroup(name, nodegroup_name, params::Dict{String,<:Any}) -Returns descriptive information about an Amazon EKS node group. +Describes a managed node group. # Arguments -- `name`: The name of the Amazon EKS cluster associated with the node group. +- `name`: The name of your cluster. - `nodegroup_name`: The name of the node group to describe. """ @@ -973,14 +1533,53 @@ function describe_nodegroup( ) end +""" + describe_pod_identity_association(association_id, name) + describe_pod_identity_association(association_id, name, params::Dict{String,<:Any}) + +Returns descriptive information about an EKS Pod Identity association. This action requires +the ID of the association. You can get the ID from the response to the +CreatePodIdentityAssocation for newly created associations. Or, you can list the IDs for +associations with ListPodIdentityAssociations and filter the list by namespace or service +account. + +# Arguments +- `association_id`: The ID of the association that you want the description of. +- `name`: The name of the cluster that the association is in. + +""" +function describe_pod_identity_association( + associationId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/clusters/$(name)/pod-identity-associations/$(associationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_pod_identity_association( + associationId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "GET", + "/clusters/$(name)/pod-identity-associations/$(associationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_update(name, update_id) describe_update(name, update_id, params::Dict{String,<:Any}) -Returns descriptive information about an update against your Amazon EKS cluster or -associated managed node group or Amazon EKS add-on. When the status of the update is -Succeeded, the update is complete. If an update fails, the status is Failed, and an error -detail explains the reason for the failure. +Describes an update to an Amazon EKS resource. When the status of the update is Succeeded, +the update is complete. If an update fails, the status is Failed, and an error detail +explains the reason for the failure. # Arguments - `name`: The name of the Amazon EKS cluster associated with the update. @@ -1016,17 +1615,56 @@ function describe_update( ) end +""" + disassociate_access_policy(name, policy_arn, principal_arn) + disassociate_access_policy(name, policy_arn, principal_arn, params::Dict{String,<:Any}) + +Disassociates an access policy from an access entry. + +# Arguments +- `name`: The name of your cluster. +- `policy_arn`: The ARN of the policy to disassociate from the access entry. For a list of + associated policies ARNs, use ListAssociatedAccessPolicies. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. + +""" +function disassociate_access_policy( + name, policyArn, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "DELETE", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies/$(policyArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_access_policy( + name, + policyArn, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "DELETE", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies/$(policyArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_identity_provider_config(identity_provider_config, name) disassociate_identity_provider_config(identity_provider_config, name, params::Dict{String,<:Any}) Disassociates an identity provider configuration from a cluster. If you disassociate an identity provider from your cluster, users included in the provider can no longer access -the cluster. However, you can still access the cluster with Amazon Web Services IAM users. +the cluster. However, you can still access the cluster with IAM principals. # Arguments - `identity_provider_config`: An object representing an identity provider configuration. -- `name`: The name of the cluster to disassociate an identity provider from. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1071,28 +1709,111 @@ function disassociate_identity_provider_config( ) end +""" + list_access_entries(name) + list_access_entries(name, params::Dict{String,<:Any}) + +Lists the access entries for your cluster. + +# Arguments +- `name`: The name of your cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"associatedPolicyArn"`: The ARN of an AccessPolicy. When you specify an access policy + ARN, only the access entries associated to that access policy are returned. For a list of + available policy ARNs, use ListAccessPolicies. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. +""" +function list_access_entries(name; aws_config::AbstractAWSConfig=global_aws_config()) + return eks( + "GET", + "/clusters/$(name)/access-entries"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_access_entries( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/clusters/$(name)/access-entries", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_access_policies() + list_access_policies(params::Dict{String,<:Any}) + +Lists the available access policies. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. +""" +function list_access_policies(; aws_config::AbstractAWSConfig=global_aws_config()) + return eks( + "GET", "/access-policies"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_access_policies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/access-policies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_addons(name) list_addons(name, params::Dict{String,<:Any}) -Lists the available add-ons. +Lists the installed add-ons. # Arguments -- `name`: The name of the cluster. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of add-on results returned by ListAddonsRequest in - paginated output. When you use this parameter, ListAddonsRequest returns only maxResults - results in a single page along with a nextToken response element. You can see the remaining - results of the initial request by sending another ListAddonsRequest request with the - returned nextToken value. This value can be between 1 and 100. If you don't use this - parameter, ListAddonsRequest returns up to 100 results and a nextToken value, if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated ListAddonsRequest - where maxResults was used and the results exceeded the value of that parameter. Pagination - continues from the end of the previous results that returned the nextToken value. This - token should be treated as an opaque identifier that is used only to retrieve the next - items in a list and not for other programmatic purposes. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. """ function list_addons(name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( @@ -1114,28 +1835,80 @@ function list_addons( ) end +""" + list_associated_access_policies(name, principal_arn) + list_associated_access_policies(name, principal_arn, params::Dict{String,<:Any}) + +Lists the access policies associated with an access entry. + +# Arguments +- `name`: The name of your cluster. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. +""" +function list_associated_access_policies( + name, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_associated_access_policies( + name, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "GET", + "/clusters/$(name)/access-entries/$(principalArn)/access-policies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_clusters() list_clusters(params::Dict{String,<:Any}) -Lists the Amazon EKS clusters in your Amazon Web Services account in the specified Region. +Lists the Amazon EKS clusters in your Amazon Web Services account in the specified Amazon +Web Services Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"include"`: Indicates whether external clusters are included in the returned list. Use - 'all' to return connected clusters, or blank to return only Amazon EKS clusters. 'all' must - be in lowercase otherwise an error occurs. -- `"maxResults"`: The maximum number of cluster results returned by ListClusters in - paginated output. When you use this parameter, ListClusters returns only maxResults results - in a single page along with a nextToken response element. You can see the remaining results - of the initial request by sending another ListClusters request with the returned nextToken - value. This value can be between 1 and 100. If you don't use this parameter, ListClusters - returns up to 100 results and a nextToken value if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated ListClusters - request where maxResults was used and the results exceeded the value of that parameter. - Pagination continues from the end of the previous results that returned the nextToken - value. This token should be treated as an opaque identifier that is used only to retrieve - the next items in a list and not for other programmatic purposes. + 'all' to return + https://docs.aws.amazon.com/eks/latest/userguide/eks-connector.htmlconnected clusters, or + blank to return only Amazon EKS clusters. 'all' must be in lowercase otherwise an error + occurs. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. """ function list_clusters(; aws_config::AbstractAWSConfig=global_aws_config()) return eks("GET", "/clusters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -1148,29 +1921,72 @@ function list_clusters( ) end +""" + list_eks_anywhere_subscriptions() + list_eks_anywhere_subscriptions(params::Dict{String,<:Any}) + +Displays the full description of the subscription. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeStatus"`: An array of subscription statuses to filter on. +- `"maxResults"`: The maximum number of cluster results returned by + ListEksAnywhereSubscriptions in paginated output. When you use this parameter, + ListEksAnywhereSubscriptions returns only maxResults results in a single page along with a + nextToken response element. You can see the remaining results of the initial request by + sending another ListEksAnywhereSubscriptions request with the returned nextToken value. + This value can be between 1 and 100. If you don't use this parameter, + ListEksAnywhereSubscriptions returns up to 10 results and a nextToken value if applicable. +- `"nextToken"`: The nextToken value returned from a previous paginated + ListEksAnywhereSubscriptions request where maxResults was used and the results exceeded the + value of that parameter. Pagination continues from the end of the previous results that + returned the nextToken value. +""" +function list_eks_anywhere_subscriptions(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/eks-anywhere-subscriptions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_eks_anywhere_subscriptions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/eks-anywhere-subscriptions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_fargate_profiles(name) list_fargate_profiles(name, params::Dict{String,<:Any}) Lists the Fargate profiles associated with the specified cluster in your Amazon Web -Services account in the specified Region. +Services account in the specified Amazon Web Services Region. # Arguments -- `name`: The name of the Amazon EKS cluster that you would like to list Fargate profiles - in. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of Fargate profile results returned by - ListFargateProfiles in paginated output. When you use this parameter, ListFargateProfiles - returns only maxResults results in a single page along with a nextToken response element. - You can see the remaining results of the initial request by sending another - ListFargateProfiles request with the returned nextToken value. This value can be between 1 - and 100. If you don't use this parameter, ListFargateProfiles returns up to 100 results and - a nextToken value if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated ListFargateProfiles - request where maxResults was used and the results exceeded the value of that parameter. - Pagination continues from the end of the previous results that returned the nextToken value. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. """ function list_fargate_profiles(name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( @@ -1196,24 +2012,24 @@ end list_identity_provider_configs(name) list_identity_provider_configs(name, params::Dict{String,<:Any}) -A list of identity provider configurations. +Lists the identity provider configurations for your cluster. # Arguments -- `name`: The cluster name that you want to list identity provider configurations for. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of identity provider configurations returned by - ListIdentityProviderConfigs in paginated output. When you use this parameter, - ListIdentityProviderConfigs returns only maxResults results in a single page along with a - nextToken response element. You can see the remaining results of the initial request by - sending another ListIdentityProviderConfigs request with the returned nextToken value. This - value can be between 1 and 100. If you don't use this parameter, - ListIdentityProviderConfigs returns up to 100 results and a nextToken value, if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated - IdentityProviderConfigsRequest where maxResults was used and the results exceeded the value - of that parameter. Pagination continues from the end of the previous results that returned - the nextToken value. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. """ function list_identity_provider_configs( name; aws_config::AbstractAWSConfig=global_aws_config() @@ -1237,28 +2053,75 @@ function list_identity_provider_configs( ) end +""" + list_insights(name) + list_insights(name, params::Dict{String,<:Any}) + +Returns a list of all insights checked for against the specified cluster. You can filter +which insights are returned by category, associated Kubernetes version, and status. + +# Arguments +- `name`: The name of the Amazon EKS cluster associated with the insights. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: The criteria to filter your list of insights for your cluster. You can filter + which insights are returned by category, associated Kubernetes version, and status. +- `"maxResults"`: The maximum number of identity provider configurations returned by + ListInsights in paginated output. When you use this parameter, ListInsights returns only + maxResults results in a single page along with a nextToken response element. You can see + the remaining results of the initial request by sending another ListInsights request with + the returned nextToken value. This value can be between 1 and 100. If you don't use this + parameter, ListInsights returns up to 100 results and a nextToken value, if applicable. +- `"nextToken"`: The nextToken value returned from a previous paginated ListInsights + request. When the results of a ListInsights request exceed maxResults, you can use this + value to retrieve the next page of results. This value is null when there are no more + results to return. +""" +function list_insights(name; aws_config::AbstractAWSConfig=global_aws_config()) + return eks( + "POST", + "/clusters/$(name)/insights"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_insights( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/clusters/$(name)/insights", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_nodegroups(name) list_nodegroups(name, params::Dict{String,<:Any}) -Lists the Amazon EKS managed node groups associated with the specified cluster in your -Amazon Web Services account in the specified Region. Self-managed node groups are not -listed. +Lists the managed node groups associated with the specified cluster in your Amazon Web +Services account in the specified Amazon Web Services Region. Self-managed node groups +aren't listed. # Arguments -- `name`: The name of the Amazon EKS cluster that you would like to list node groups in. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of node group results returned by ListNodegroups in - paginated output. When you use this parameter, ListNodegroups returns only maxResults - results in a single page along with a nextToken response element. You can see the remaining - results of the initial request by sending another ListNodegroups request with the returned - nextToken value. This value can be between 1 and 100. If you don't use this parameter, - ListNodegroups returns up to 100 results and a nextToken value if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated ListNodegroups - request where maxResults was used and the results exceeded the value of that parameter. - Pagination continues from the end of the previous results that returned the nextToken value. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. """ function list_nodegroups(name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( @@ -1280,6 +2143,56 @@ function list_nodegroups( ) end +""" + list_pod_identity_associations(name) + list_pod_identity_associations(name, params::Dict{String,<:Any}) + +List the EKS Pod Identity associations in a cluster. You can filter the list by the +namespace that the association is in or the service account that the association uses. + +# Arguments +- `name`: The name of the cluster that the associations are in. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of EKS Pod Identity association results returned by + ListPodIdentityAssociations in paginated output. When you use this parameter, + ListPodIdentityAssociations returns only maxResults results in a single page along with a + nextToken response element. You can see the remaining results of the initial request by + sending another ListPodIdentityAssociations request with the returned nextToken value. This + value can be between 1 and 100. If you don't use this parameter, + ListPodIdentityAssociations returns up to 100 results and a nextToken value if applicable. +- `"namespace"`: The name of the Kubernetes namespace inside the cluster that the + associations are in. +- `"nextToken"`: The nextToken value returned from a previous paginated ListUpdates request + where maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + token should be treated as an opaque identifier that is used only to retrieve the next + items in a list and not for other programmatic purposes. +- `"serviceAccount"`: The name of the Kubernetes service account that the associations use. +""" +function list_pod_identity_associations( + name; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/clusters/$(name)/pod-identity-associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_pod_identity_associations( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "GET", + "/clusters/$(name)/pod-identity-associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1287,9 +2200,8 @@ end List the tags for an Amazon EKS resource. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) that identifies the resource for which to - list the tags. Currently, the supported resources are Amazon EKS clusters and managed node - groups. +- `resource_arn`: The Amazon Resource Name (ARN) that identifies the resource to list tags + for. """ function list_tags_for_resource( @@ -1320,8 +2232,8 @@ end list_updates(name) list_updates(name, params::Dict{String,<:Any}) -Lists the updates associated with an Amazon EKS cluster or managed node group in your -Amazon Web Services account, in the specified Region. +Lists the updates associated with an Amazon EKS resource in your Amazon Web Services +account, in the specified Amazon Web Services Region. # Arguments - `name`: The name of the Amazon EKS cluster to list updates for. @@ -1329,15 +2241,17 @@ Amazon Web Services account, in the specified Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"addonName"`: The names of the installed add-ons that have available updates. -- `"maxResults"`: The maximum number of update results returned by ListUpdates in paginated - output. When you use this parameter, ListUpdates returns only maxResults results in a - single page along with a nextToken response element. You can see the remaining results of - the initial request by sending another ListUpdates request with the returned nextToken - value. This value can be between 1 and 100. If you don't use this parameter, ListUpdates - returns up to 100 results and a nextToken value if applicable. -- `"nextToken"`: The nextToken value returned from a previous paginated ListUpdates request - where maxResults was used and the results exceeded the value of that parameter. Pagination - continues from the end of the previous results that returned the nextToken value. +- `"maxResults"`: The maximum number of results, returned in paginated output. You receive + maxResults in a single page, along with a nextToken response element. You can see the + remaining results of the initial request by sending another request with the returned + nextToken value. This value can be between 1 and 100. If you don't use this parameter, 100 + results and a nextToken value, if applicable, are returned. +- `"nextToken"`: The nextToken value returned from a previous paginated request, where + maxResults was used and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the nextToken value. This + value is null when there are no more results to return. This token should be treated as an + opaque identifier that is used only to retrieve the next items in a list and not for other + programmatic purposes. - `"nodegroupName"`: The name of the Amazon EKS managed node group to list updates for. """ function list_updates(name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1369,23 +2283,23 @@ be connected to the Amazon EKS control plane to view current information about t and its nodes. Cluster connection requires two steps. First, send a RegisterClusterRequest to add it to the Amazon EKS control plane. Second, a Manifest containing the activationID and activationCode must be applied to the Kubernetes cluster -through it's native provider to provide visibility. After the Manifest is updated and -applied, then the connected cluster is visible to the Amazon EKS control plane. If the -Manifest is not applied within three days, then the connected cluster will no longer be -visible and must be deregistered. See DeregisterCluster. +through it's native provider to provide visibility. After the manifest is updated and +applied, the connected cluster is visible to the Amazon EKS control plane. If the manifest +isn't applied within three days, the connected cluster will no longer be visible and must +be deregistered using DeregisterCluster. # Arguments - `connector_config`: The configuration settings required to connect the Kubernetes cluster to the Amazon EKS control plane. -- `name`: Define a unique name for this cluster for your Region. +- `name`: A unique name for this cluster in your Amazon Web Services Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"tags"`: The metadata that you apply to the cluster to assist with categorization and - organization. Each tag consists of a key and an optional value, both of which you define. - Cluster tags do not propagate to any other resources associated with the cluster. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"tags"`: Metadata that assists with categorization and organization. Each tag consists + of a key and an optional value. You define both. Tags don't propagate to any other cluster + or Amazon Web Services resources. """ function register_cluster( connectorConfig, name; aws_config::AbstractAWSConfig=global_aws_config() @@ -1431,17 +2345,19 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Associates the specified tags to a resource with the specified resourceArn. If existing -tags on a resource are not specified in the request parameters, they are not changed. When -a resource is deleted, the tags associated with that resource are deleted as well. Tags -that you create for Amazon EKS resources do not propagate to any other resources associated -with the cluster. For example, if you tag a cluster with this operation, that tag does not -automatically propagate to the subnets and nodes associated with the cluster. +Associates the specified tags to an Amazon EKS resource with the specified resourceArn. If +existing tags on a resource are not specified in the request parameters, they aren't +changed. When a resource is deleted, the tags associated with that resource are also +deleted. Tags that you create for Amazon EKS resources don't propagate to any other +resources associated with the cluster. For example, if you tag a cluster with this +operation, that tag doesn't automatically propagate to the subnets and nodes associated +with the cluster. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource to which to add tags. - Currently, the supported resources are Amazon EKS clusters and managed node groups. -- `tags`: The tags to add to the resource. A tag is an array of key-value pairs. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to add tags to. +- `tags`: Metadata that assists with categorization and organization. Each tag consists of + a key and an optional value. You define both. Tags don't propagate to any other cluster or + Amazon Web Services resources. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1472,12 +2388,11 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Deletes specified tags from a resource. +Deletes specified tags from an Amazon EKS resource. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource from which to delete tags. - Currently, the supported resources are Amazon EKS clusters and managed node groups. -- `tag_keys`: The keys of the tags to be removed. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to delete tags from. +- `tag_keys`: The keys of the tags to remove. """ function untag_resource( @@ -1506,6 +2421,68 @@ function untag_resource( ) end +""" + update_access_entry(name, principal_arn) + update_access_entry(name, principal_arn, params::Dict{String,<:Any}) + +Updates an access entry. + +# Arguments +- `name`: The name of your cluster. +- `principal_arn`: The ARN of the IAM principal for the AccessEntry. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"kubernetesGroups"`: The value for name that you've specified for kind: Group as a + subject in a Kubernetes RoleBinding or ClusterRoleBinding object. Amazon EKS doesn't + confirm that the value for name exists in any bindings on your cluster. You can specify one + or more names. Kubernetes authorizes the principalArn of the access entry to access any + cluster objects that you've specified in a Kubernetes Role or ClusterRole object that is + also specified in a binding's roleRef. For more information about creating Kubernetes + RoleBinding, ClusterRoleBinding, Role, or ClusterRole objects, see Using RBAC Authorization + in the Kubernetes documentation. If you want Amazon EKS to authorize the principalArn + (instead of, or in addition to Kubernetes authorizing the principalArn), you can associate + one or more access policies to the access entry using AssociateAccessPolicy. If you + associate any access policies, the principalARN has all permissions assigned in the + associated access policies and all permissions in any Kubernetes Role or ClusterRole + objects that the group names are bound to. +- `"username"`: The username to authenticate to Kubernetes with. We recommend not + specifying a username and letting Amazon EKS specify it for you. For more information about + the value Amazon EKS specifies for you, or constraints before specifying your own username, + see Creating access entries in the Amazon EKS User Guide. +""" +function update_access_entry( + name, principalArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/clusters/$(name)/access-entries/$(principalArn)", + Dict{String,Any}("clientRequestToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_access_entry( + name, + principalArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/access-entries/$(principalArn)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clientRequestToken" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_addon(addon_name, name) update_addon(addon_name, name, params::Dict{String,<:Any}) @@ -1515,16 +2492,22 @@ Updates an Amazon EKS add-on. # Arguments - `addon_name`: The name of the add-on. The name must match one of the names returned by ListAddons . -- `name`: The name of the cluster. +- `name`: The name of your cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"addonVersion"`: The version of the add-on. The version must match one of the versions returned by DescribeAddonVersions . -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. - `"configurationValues"`: The set of configuration values for the add-on that's created. - The values that you provide are validated against the schema in DescribeAddonConfiguration. + The values that you provide are validated against the schema returned by + DescribeAddonConfiguration. +- `"podIdentityAssociations"`: An array of Pod Identity Assocations to be updated. Each EKS + Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is + left blank, no change. If an empty array is provided, existing Pod Identity Assocations + owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon + EKS add-on using Pod Identity in the EKS User Guide. - `"resolveConflicts"`: How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Conflicts are handled based on the option you choose: None – Amazon EKS doesn't change the value. The update might @@ -1574,18 +2557,23 @@ end Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of -your cluster update with the DescribeUpdate API operation. You can use this API operation -to enable or disable exporting the Kubernetes control plane logs for your cluster to -CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. -For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User -Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to -exported control plane logs. For more information, see CloudWatch Pricing. You can also -use this API operation to enable or disable public and private access to your cluster's -Kubernetes API server endpoint. By default, public access is enabled, and private access is -disabled. For more information, see Amazon EKS cluster endpoint access control in the -Amazon EKS User Guide . You can't update the subnets or security group IDs for an -existing cluster. Cluster updates are asynchronous, and they should finish within a few -minutes. During an update, the cluster status moves to UPDATING (this status transition is +your cluster update with DescribeUpdate\"/>. You can use this API operation to enable or +disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By +default, cluster control plane logs aren't exported to CloudWatch Logs. For more +information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . +CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported +control plane logs. For more information, see CloudWatch Pricing. You can also use this +API operation to enable or disable public and private access to your cluster's Kubernetes +API server endpoint. By default, public access is enabled, and private access is disabled. +For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS +User Guide . You can also use this API operation to choose different subnets and security +groups for the cluster. You must specify at least two subnets that are in different +Availability Zones. You can't change which VPC the subnets are from, the subnets must be in +the same VPC as the subnets that the cluster was created with. For more information about +the VPC requirements, see +https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User +Guide . Cluster updates are asynchronous, and they should finish within a few minutes. +During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active. @@ -1594,8 +2582,9 @@ cluster status moves to Active. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. +- `"accessConfig"`: The access configuration for the cluster. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. - `"logging"`: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the @@ -1648,8 +2637,8 @@ cluster to a new Kubernetes version. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. """ function update_cluster_version( name, version; aws_config::AbstractAWSConfig=global_aws_config() @@ -1685,6 +2674,56 @@ function update_cluster_version( ) end +""" + update_eks_anywhere_subscription(auto_renew, id) + update_eks_anywhere_subscription(auto_renew, id, params::Dict{String,<:Any}) + +Update an EKS Anywhere Subscription. Only auto renewal and tags can be updated after +subscription creation. + +# Arguments +- `auto_renew`: A boolean indicating whether or not to automatically renew the subscription. +- `id`: The ID of the subscription. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: Unique, case-sensitive identifier to ensure the idempotency of + the request. +""" +function update_eks_anywhere_subscription( + autoRenew, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/eks-anywhere-subscriptions/$(id)", + Dict{String,Any}("autoRenew" => autoRenew, "clientRequestToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_eks_anywhere_subscription( + autoRenew, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/eks-anywhere-subscriptions/$(id)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "autoRenew" => autoRenew, "clientRequestToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_nodegroup_config(name, nodegroup_name) update_nodegroup_config(name, nodegroup_name, params::Dict{String,<:Any}) @@ -1695,14 +2734,14 @@ track the status of your node group update with the DescribeUpdate API operation you can update the Kubernetes labels for a node group or the scaling configuration. # Arguments -- `name`: The name of the Amazon EKS cluster that the managed node group resides in. +- `name`: The name of your cluster. - `nodegroup_name`: The name of the managed node group to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"labels"`: The Kubernetes labels to be applied to the nodes in the node group after the +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"labels"`: The Kubernetes labels to apply to the nodes in the node group after the update. - `"scalingConfig"`: The scaling configuration details for the Auto Scaling group after the update. @@ -1757,24 +2796,23 @@ request. For information about Linux versions, see Amazon EKS optimized Amazon L versions in the Amazon EKS User Guide. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. You cannot roll back a node group to an earlier Kubernetes version or AMI version. When a node in a managed node -group is terminated due to a scaling action or update, the pods in that node are drained +group is terminated due to a scaling action or update, every Pod on that node is drained first. Amazon EKS attempts to drain the nodes gracefully and will fail if it is unable to do so. You can force the update if Amazon EKS is unable to drain the nodes as a result of a -pod disruption budget issue. +Pod disruption budget issue. # Arguments -- `name`: The name of the Amazon EKS cluster that is associated with the managed node group - to update. +- `name`: The name of your cluster. - `nodegroup_name`: The name of the managed node group to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"force"`: Force the update if the existing node group's pods are unable to be drained - due to a pod disruption budget issue. If an update fails because pods could not be drained, - you can force the update after it fails to terminate the old node whether or not any pods - are running on the node. +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"force"`: Force the update if any Pod on the existing node group can't be drained due to + a Pod disruption budget issue. If an update fails because all Pods can't be drained, you + can force the update after it fails to terminate the old node whether or not any Pod is + running on the node. - `"launchTemplate"`: An object representing a node group's launch template specification. You can only update a node group using a launch template if the node group was originally deployed with a launch template. @@ -1786,14 +2824,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch - templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + templates with Amazon EKS, see Customizing managed nodes with launch templates in the + Amazon EKS User Guide. - `"version"`: The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group update will fail. For more information - about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS - User Guide. + about using launch templates with Amazon EKS, see Customizing managed nodes with launch + templates in the Amazon EKS User Guide. """ function update_nodegroup_version( name, nodegroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1824,3 +2863,52 @@ function update_nodegroup_version( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_pod_identity_association(association_id, name) + update_pod_identity_association(association_id, name, params::Dict{String,<:Any}) + +Updates a EKS Pod Identity association. Only the IAM role can be changed; an association +can't be moved between clusters, namespaces, or service accounts. If you need to edit the +namespace or service account, you need to delete the association and then create a new +association with your desired settings. + +# Arguments +- `association_id`: The ID of the association to be updated. +- `name`: The name of the cluster that you want to update the association in. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. +- `"roleArn"`: The new IAM role to change the +""" +function update_pod_identity_association( + associationId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks( + "POST", + "/clusters/$(name)/pod-identity-associations/$(associationId)", + Dict{String,Any}("clientRequestToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_pod_identity_association( + associationId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks( + "POST", + "/clusters/$(name)/pod-identity-associations/$(associationId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clientRequestToken" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/eks_auth.jl b/src/services/eks_auth.jl new file mode 100644 index 0000000000..13531624c2 --- /dev/null +++ b/src/services/eks_auth.jl @@ -0,0 +1,46 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: eks_auth +using AWS.Compat +using AWS.UUIDs + +""" + assume_role_for_pod_identity(cluster_name, token) + assume_role_for_pod_identity(cluster_name, token, params::Dict{String,<:Any}) + +The Amazon EKS Auth API and the AssumeRoleForPodIdentity action are only used by the EKS +Pod Identity Agent. We recommend that applications use the Amazon Web Services SDKs to +connect to Amazon Web Services services; if credentials from an EKS Pod Identity +association are available in the pod, the latest versions of the SDKs use them +automatically. + +# Arguments +- `cluster_name`: The name of the cluster for the request. +- `token`: The token of the Kubernetes service account for the pod. + +""" +function assume_role_for_pod_identity( + clusterName, token; aws_config::AbstractAWSConfig=global_aws_config() +) + return eks_auth( + "POST", + "/clusters/$(clusterName)/assume-role-for-pod-identity", + Dict{String,Any}("token" => token); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function assume_role_for_pod_identity( + clusterName, + token, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return eks_auth( + "POST", + "/clusters/$(clusterName)/assume-role-for-pod-identity", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("token" => token), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/elastic_load_balancing_v2.jl b/src/services/elastic_load_balancing_v2.jl index ffbc606d3d..32780708d9 100644 --- a/src/services/elastic_load_balancing_v2.jl +++ b/src/services/elastic_load_balancing_v2.jl @@ -58,8 +58,8 @@ end Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, Gateway Load Balancers, target groups, -listeners, and rules. Each tag consists of a key and an optional value. If a resource -already has a tag with the same key, AddTags updates its value. +trust stores, listeners, and rules. Each tag consists of a key and an optional value. If a +resource already has a tag with the same key, AddTags updates its value. # Arguments - `resource_arns`: The Amazon Resource Name (ARN) of the resource. @@ -94,6 +94,44 @@ function add_tags( ) end +""" + add_trust_store_revocations(trust_store_arn) + add_trust_store_revocations(trust_store_arn, params::Dict{String,<:Any}) + +Adds the specified revocation file to the specified trust store. + +# Arguments +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RevocationContents"`: The revocation file to add. +""" +function add_trust_store_revocations( + TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "AddTrustStoreRevocations", + Dict{String,Any}("TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_trust_store_revocations( + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "AddTrustStoreRevocations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TrustStoreArn" => TrustStoreArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_listener(default_actions, load_balancer_arn) create_listener(default_actions, load_balancer_arn, params::Dict{String,<:Any}) @@ -118,6 +156,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Certificates"`: [HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault. +- `"MutualAuthentication"`: The mutual authentication configuration information. - `"Port"`: The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer. - `"Protocol"`: The protocol for connections from clients to the load balancer. For @@ -183,9 +222,14 @@ settings, each call succeeds. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CustomerOwnedIpv4Pool"`: [Application Load Balancers on Outposts] The ID of the customer-owned address pool (CoIP pool). -- `"IpAddressType"`: The type of IP addresses used by the subnets for your load balancer. - The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 - addresses). +- `"IpAddressType"`: Note: Internal load balancers must use the ipv4 IP address type. + [Application Load Balancers] The IP address type. The possible values are ipv4 (for only + IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 + (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load + Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and + dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer + with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible + values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). - `"Scheme"`: The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from @@ -194,9 +238,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer. The default is an Internet-facing load balancer. You cannot specify a scheme for a Gateway Load Balancer. -- `"SecurityGroups"`: [Application Load Balancers] The IDs of the security groups for the - load balancer. -- `"SubnetMappings"`: The IDs of the public subnets. You can specify only one subnet per +- `"SecurityGroups"`: [Application Load Balancers and Network Load Balancers] The IDs of + the security groups for the load balancer. +- `"SubnetMappings"`: The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on @@ -208,14 +252,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets. -- `"Subnets"`: The IDs of the public subnets. You can specify only one subnet per - Availability Zone. You must specify either subnets or subnet mappings, but not both. To - specify an Elastic IP address, specify subnet mappings instead of subnets. [Application - Load Balancers] You must specify subnets from at least two Availability Zones. [Application - Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load - Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network - Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load - Balancers] You can specify subnets from one or more Availability Zones. +- `"Subnets"`: The IDs of the subnets. You can specify only one subnet per Availability + Zone. You must specify either subnets or subnet mappings, but not both. To specify an + Elastic IP address, specify subnet mappings instead of subnets. [Application Load + Balancers] You must specify subnets from at least two Availability Zones. [Application Load + Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on + Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] + You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You + can specify subnets from one or more Availability Zones. - `"Tags"`: The tags to assign to the load balancer. - `"Type"`: The type of load balancer. The default is application. """ @@ -406,6 +450,66 @@ function create_target_group( ) end +""" + create_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, name) + create_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, name, params::Dict{String,<:Any}) + +Creates a trust store. + +# Arguments +- `ca_certificates_bundle_s3_bucket`: The Amazon S3 bucket for the ca certificates bundle. +- `ca_certificates_bundle_s3_key`: The Amazon S3 path for the ca certificates bundle. +- `name`: The name of the trust store. This name must be unique per region and cannot be + changed after creation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CaCertificatesBundleS3ObjectVersion"`: The Amazon S3 object version for the ca + certificates bundle. If undefined the current version is used. +- `"Tags"`: The tags to assign to the trust store. +""" +function create_trust_store( + CaCertificatesBundleS3Bucket, + CaCertificatesBundleS3Key, + Name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "CreateTrustStore", + Dict{String,Any}( + "CaCertificatesBundleS3Bucket" => CaCertificatesBundleS3Bucket, + "CaCertificatesBundleS3Key" => CaCertificatesBundleS3Key, + "Name" => Name, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_trust_store( + CaCertificatesBundleS3Bucket, + CaCertificatesBundleS3Key, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "CreateTrustStore", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CaCertificatesBundleS3Bucket" => CaCertificatesBundleS3Bucket, + "CaCertificatesBundleS3Key" => CaCertificatesBundleS3Key, + "Name" => Name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_listener(listener_arn) delete_listener(listener_arn, params::Dict{String,<:Any}) @@ -550,12 +654,54 @@ function delete_target_group( ) end +""" + delete_trust_store(trust_store_arn) + delete_trust_store(trust_store_arn, params::Dict{String,<:Any}) + +Deletes a trust store. + +# Arguments +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function delete_trust_store( + TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DeleteTrustStore", + Dict{String,Any}("TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_trust_store( + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DeleteTrustStore", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TrustStoreArn" => TrustStoreArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ deregister_targets(target_group_arn, targets) deregister_targets(target_group_arn, targets, params::Dict{String,<:Any}) Deregisters the specified targets from the specified target group. After the targets are -deregistered, they no longer receive traffic from the load balancer. +deregistered, they no longer receive traffic from the load balancer. The load balancer +stops sending requests to targets that are deregistering, but uses connection draining to +ensure that in-flight traffic completes on the existing connections. This deregistration +delay is configured by default but can be updated for each target group. For more +information, see the following: Deregistration delay in the Application Load Balancers +User Guide Deregistration delay in the Network Load Balancers User Guide +Deregistration delay in the Gateway Load Balancers User Guide Note: If the specified +target does not exist, the action returns successfully. # Arguments - `target_group_arn`: The Amazon Resource Name (ARN) of the target group. @@ -949,6 +1095,7 @@ Describes the health of the specified targets or all of your targets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Include"`: Used to inclue anomaly detection information. - `"Targets"`: The targets. """ function describe_target_health( @@ -976,6 +1123,197 @@ function describe_target_health( ) end +""" + describe_trust_store_associations(trust_store_arn) + describe_trust_store_associations(trust_store_arn, params::Dict{String,<:Any}) + +Describes all resources associated with the specified trust store. + +# Arguments +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The marker for the next set of results. (You received this marker from a + previous call.) +- `"PageSize"`: The maximum number of results to return with this call. +""" +function describe_trust_store_associations( + TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DescribeTrustStoreAssociations", + Dict{String,Any}("TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_trust_store_associations( + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DescribeTrustStoreAssociations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TrustStoreArn" => TrustStoreArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_trust_store_revocations(trust_store_arn) + describe_trust_store_revocations(trust_store_arn, params::Dict{String,<:Any}) + +Describes the revocation files in use by the specified trust store arn, or revocation ID. + +# Arguments +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The marker for the next set of results. (You received this marker from a + previous call.) +- `"PageSize"`: The maximum number of results to return with this call. +- `"RevocationIds"`: The revocation IDs of the revocation files you want to describe. +""" +function describe_trust_store_revocations( + TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DescribeTrustStoreRevocations", + Dict{String,Any}("TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_trust_store_revocations( + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DescribeTrustStoreRevocations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TrustStoreArn" => TrustStoreArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_trust_stores() + describe_trust_stores(params::Dict{String,<:Any}) + +Describes all trust stores for a given account by trust store arn’s or name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The marker for the next set of results. (You received this marker from a + previous call.) +- `"Names"`: The names of the trust stores. +- `"PageSize"`: The maximum number of results to return with this call. +- `"TrustStoreArns"`: The Amazon Resource Name (ARN) of the trust store. +""" +function describe_trust_stores(; aws_config::AbstractAWSConfig=global_aws_config()) + return elastic_load_balancing_v2( + "DescribeTrustStores"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_trust_stores( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DescribeTrustStores", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_trust_store_ca_certificates_bundle(trust_store_arn) + get_trust_store_ca_certificates_bundle(trust_store_arn, params::Dict{String,<:Any}) + +Retrieves the ca certificate bundle. This action returns a pre-signed S3 URI which is +active for ten minutes. + +# Arguments +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function get_trust_store_ca_certificates_bundle( + TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "GetTrustStoreCaCertificatesBundle", + Dict{String,Any}("TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_trust_store_ca_certificates_bundle( + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "GetTrustStoreCaCertificatesBundle", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("TrustStoreArn" => TrustStoreArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_trust_store_revocation_content(revocation_id, trust_store_arn) + get_trust_store_revocation_content(revocation_id, trust_store_arn, params::Dict{String,<:Any}) + +Retrieves the specified revocation file. This action returns a pre-signed S3 URI which is +active for ten minutes. + +# Arguments +- `revocation_id`: The revocation ID of the revocation file. +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function get_trust_store_revocation_content( + RevocationId, TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "GetTrustStoreRevocationContent", + Dict{String,Any}("RevocationId" => RevocationId, "TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_trust_store_revocation_content( + RevocationId, + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "GetTrustStoreRevocationContent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RevocationId" => RevocationId, "TrustStoreArn" => TrustStoreArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_listener(listener_arn) modify_listener(listener_arn, params::Dict{String,<:Any}) @@ -1001,6 +1339,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault. - `"DefaultActions"`: The actions for the default rule. +- `"MutualAuthentication"`: The mutual authentication configuration information. - `"Port"`: The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer. - `"Protocol"`: The protocol for connections from clients to the load balancer. Application @@ -1222,6 +1561,64 @@ function modify_target_group_attributes( ) end +""" + modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn) + modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn, params::Dict{String,<:Any}) + +Update the ca certificate bundle for a given trust store. + +# Arguments +- `ca_certificates_bundle_s3_bucket`: The Amazon S3 bucket for the ca certificates bundle. +- `ca_certificates_bundle_s3_key`: The Amazon S3 path for the ca certificates bundle. +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CaCertificatesBundleS3ObjectVersion"`: The Amazon S3 object version for the ca + certificates bundle. If undefined the current version is used. +""" +function modify_trust_store( + CaCertificatesBundleS3Bucket, + CaCertificatesBundleS3Key, + TrustStoreArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "ModifyTrustStore", + Dict{String,Any}( + "CaCertificatesBundleS3Bucket" => CaCertificatesBundleS3Bucket, + "CaCertificatesBundleS3Key" => CaCertificatesBundleS3Key, + "TrustStoreArn" => TrustStoreArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_trust_store( + CaCertificatesBundleS3Bucket, + CaCertificatesBundleS3Key, + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "ModifyTrustStore", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CaCertificatesBundleS3Bucket" => CaCertificatesBundleS3Bucket, + "CaCertificatesBundleS3Key" => CaCertificatesBundleS3Key, + "TrustStoreArn" => TrustStoreArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_targets(target_group_arn, targets) register_targets(target_group_arn, targets, params::Dict{String,<:Any}) @@ -1358,6 +1755,51 @@ function remove_tags( ) end +""" + remove_trust_store_revocations(revocation_ids, trust_store_arn) + remove_trust_store_revocations(revocation_ids, trust_store_arn, params::Dict{String,<:Any}) + +Removes the specified revocation file from the specified trust store. + +# Arguments +- `revocation_ids`: The revocation IDs of the revocation files you want to remove. +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function remove_trust_store_revocations( + RevocationIds, TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "RemoveTrustStoreRevocations", + Dict{String,Any}( + "RevocationIds" => RevocationIds, "TrustStoreArn" => TrustStoreArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_trust_store_revocations( + RevocationIds, + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "RemoveTrustStoreRevocations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RevocationIds" => RevocationIds, "TrustStoreArn" => TrustStoreArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_ip_address_type(ip_address_type, load_balancer_arn) set_ip_address_type(ip_address_type, load_balancer_arn, params::Dict{String,<:Any}) @@ -1365,9 +1807,14 @@ end Sets the type of IP addresses used by the subnets of the specified load balancer. # Arguments -- `ip_address_type`: The IP address type. The possible values are ipv4 (for IPv4 addresses) - and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load - balancer with a UDP or TCP_UDP listener. +- `ip_address_type`: Note: Internal load balancers must use the ipv4 IP address type. + [Application Load Balancers] The IP address type. The possible values are ipv4 (for only + IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 + (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load + Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and + dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer + with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible + values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). - `load_balancer_arn`: The Amazon Resource Name (ARN) of the load balancer. """ @@ -1446,14 +1893,21 @@ end set_security_groups(load_balancer_arn, security_groups) set_security_groups(load_balancer_arn, security_groups, params::Dict{String,<:Any}) -Associates the specified security groups with the specified Application Load Balancer. The -specified security groups override the previously associated security groups. You can't -specify a security group for a Network Load Balancer or Gateway Load Balancer. +Associates the specified security groups with the specified Application Load Balancer or +Network Load Balancer. The specified security groups override the previously associated +security groups. You can't perform this operation on a Network Load Balancer unless you +specified a security group for the load balancer when you created it. You can't associate a +security group with a Gateway Load Balancer. # Arguments - `load_balancer_arn`: The Amazon Resource Name (ARN) of the load balancer. - `security_groups`: The IDs of the security groups. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic"`: Indicates whether to evaluate + inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web + Services PrivateLink. The default is on. """ function set_security_groups( LoadBalancerArn, SecurityGroups; aws_config::AbstractAWSConfig=global_aws_config() @@ -1494,20 +1948,25 @@ end set_subnets(load_balancer_arn, params::Dict{String,<:Any}) Enables the Availability Zones for the specified public subnets for the specified -Application Load Balancer or Network Load Balancer. The specified subnets replace the -previously enabled subnets. When you specify subnets for a Network Load Balancer, you must -include all subnets that were enabled previously, with their existing configurations, plus -any additional subnets. +Application Load Balancer, Network Load Balancer or Gateway Load Balancer. The specified +subnets replace the previously enabled subnets. When you specify subnets for a Network Load +Balancer, or Gateway Load Balancer you must include all subnets that were enabled +previously, with their existing configurations, plus any additional subnets. # Arguments - `load_balancer_arn`: The Amazon Resource Name (ARN) of the load balancer. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"IpAddressType"`: [Network Load Balancers] The type of IP addresses used by the subnets - for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack - (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP - or TCP_UDP listener. . +- `"IpAddressType"`: [Application Load Balancers] The IP address type. The possible values + are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and + dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 + addresses). [Network Load Balancers] The type of IP addresses used by the subnets for your + load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 + and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or + TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for + your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for + IPv4 and IPv6 addresses). - `"SubnetMappings"`: The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot @@ -1518,12 +1977,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. + [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. - `"Subnets"`: The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] - You can specify subnets from one or more Availability Zones. + You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You + can specify subnets from one or more Availability Zones. """ function set_subnets(LoadBalancerArn; aws_config::AbstractAWSConfig=global_aws_config()) return elastic_load_balancing_v2( diff --git a/src/services/elasticache.jl b/src/services/elasticache.jl index 42a0057956..f1917987c5 100644 --- a/src/services/elasticache.jl +++ b/src/services/elasticache.jl @@ -249,6 +249,65 @@ function complete_migration( ) end +""" + copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name) + copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name, params::Dict{String,<:Any}) + +Creates a copy of an existing serverless cache’s snapshot. Available for Redis only. + +# Arguments +- `source_serverless_cache_snapshot_name`: The identifier of the existing serverless + cache’s snapshot to be copied. Available for Redis only. +- `target_serverless_cache_snapshot_name`: The identifier for the snapshot to be created. + Available for Redis only. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"KmsKeyId"`: The identifier of the KMS key used to encrypt the target snapshot. + Available for Redis only. +- `"Tags"`: A list of tags to be added to the target snapshot resource. A tag is a + key-value pair. Available for Redis only. Default: NULL +""" +function copy_serverless_cache_snapshot( + SourceServerlessCacheSnapshotName, + TargetServerlessCacheSnapshotName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "CopyServerlessCacheSnapshot", + Dict{String,Any}( + "SourceServerlessCacheSnapshotName" => SourceServerlessCacheSnapshotName, + "TargetServerlessCacheSnapshotName" => TargetServerlessCacheSnapshotName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_serverless_cache_snapshot( + SourceServerlessCacheSnapshotName, + TargetServerlessCacheSnapshotName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "CopyServerlessCacheSnapshot", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SourceServerlessCacheSnapshotName" => + SourceServerlessCacheSnapshotName, + "TargetServerlessCacheSnapshotName" => + TargetServerlessCacheSnapshotName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ copy_snapshot(source_snapshot_name, target_snapshot_name) copy_snapshot(source_snapshot_name, target_snapshot_name, params::Dict{String,<:Any}) @@ -371,37 +430,40 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current - generation: M6g node types (available only for Redis engine version 5.0.6 onward and for - Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, - cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, - cache.m6g.16xlarge For region availability, see Supported Node Types M5 node types: - cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, - cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, - cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node - types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: - cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - Memory optimized: Current generation: R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward). cache.r6g.large, + generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, + cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region + availability, see Supported Node Types M6g node types (available only for Redis engine + version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, + cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, + cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine + version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis engine + version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, - cache.r6g.12xlarge, cache.r6g.16xlarge For region availability, see Supported Node Types - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous - generation: (not recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ + with automatic failover is not supported on T1 instances. Redis configuration variables + appendonly and appendfsync are not supported on Redis version 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a @@ -769,7 +831,7 @@ end Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis (cluster mode -disabled) replication group is a collection of clusters, where one of the clusters is a +disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and @@ -823,37 +885,40 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current - generation: M6g node types (available only for Redis engine version 5.0.6 onward and for - Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, - cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, - cache.m6g.16xlarge For region availability, see Supported Node Types M5 node types: - cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro, - cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, - cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node - types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: - cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Compute optimized: - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - Memory optimized: Current generation: R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward). cache.r6g.large, + generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, + cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region + availability, see Supported Node Types M6g node types (available only for Redis engine + version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, + cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, + cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine + version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis engine + version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, - cache.r6g.12xlarge, cache.r6g.16xlarge For region availability, see Supported Node Types - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous - generation: (not recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ + with automatic failover is not supported on T1 instances. Redis configuration variables + appendonly and appendfsync are not supported on Redis version 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis version 3.2.4 or later, only one node @@ -926,9 +991,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of NumCacheClusters. Default: system chosen Availability Zones. - `"PreferredMaintenanceWindow"`: Specifies the weekly time range during which maintenance - on the cluster is performed. It is specified as a range in the format - ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute - period. Valid values for ddd are: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat @@ -942,6 +1004,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SecurityGroupIds"`: One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). +- `"ServerlessCacheSnapshotName"`: The name of the snapshot used to create a replication + group. Available for Redis only. - `"SnapshotArns"`: A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new @@ -1019,6 +1083,137 @@ function create_replication_group( ) end +""" + create_serverless_cache(engine, serverless_cache_name) + create_serverless_cache(engine, serverless_cache_name, params::Dict{String,<:Any}) + +Creates a serverless cache. + +# Arguments +- `engine`: The name of the cache engine to be used for creating the serverless cache. +- `serverless_cache_name`: User-provided identifier for the serverless cache. This + parameter is stored as a lowercase string. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CacheUsageLimits"`: Sets the cache usage limits for storage and ElastiCache Processing + Units for the cache. +- `"DailySnapshotTime"`: The daily time that snapshots will be created from the new + serverless cache. By default this number is populated with 0, i.e. no snapshots will be + created on an automatic daily basis. Available for Redis only. +- `"Description"`: User-provided description for the serverless cache. The default is NULL, + i.e. if no description is provided then an empty string will be returned. The maximum + length is 255 characters. +- `"KmsKeyId"`: ARN of the customer managed key for encrypting the data at rest. If no KMS + key is provided, a default service key is used. +- `"MajorEngineVersion"`: The version of the cache engine that will be used to create the + serverless cache. +- `"SecurityGroupIds"`: A list of the one or more VPC security groups to be associated with + the serverless cache. The security group will authorize traffic access for the VPC + end-point (private-link). If no other information is given this will be the VPC’s Default + Security Group that is associated with the cluster VPC end-point. +- `"SnapshotArnsToRestore"`: The ARN(s) of the snapshot that the new serverless cache will + be created from. Available for Redis only. +- `"SnapshotRetentionLimit"`: The number of snapshots that will be retained for the + serverless cache that is being created. As new snapshots beyond this limit are added, the + oldest snapshots will be deleted on a rolling basis. Available for Redis only. +- `"SubnetIds"`: A list of the identifiers of the subnets where the VPC endpoint for the + serverless cache will be deployed. All the subnetIds must belong to the same VPC. +- `"Tags"`: The list of tags (key, value) pairs to be added to the serverless cache + resource. Default is NULL. +- `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless + cache. Available for Redis only. Default is NULL. +""" +function create_serverless_cache( + Engine, ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "CreateServerlessCache", + Dict{String,Any}("Engine" => Engine, "ServerlessCacheName" => ServerlessCacheName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_serverless_cache( + Engine, + ServerlessCacheName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "CreateServerlessCache", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Engine" => Engine, "ServerlessCacheName" => ServerlessCacheName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_serverless_cache_snapshot(serverless_cache_name, serverless_cache_snapshot_name) + create_serverless_cache_snapshot(serverless_cache_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) + +This API creates a copy of an entire ServerlessCache at a specific moment in time. +Available for Redis only. + +# Arguments +- `serverless_cache_name`: The name of an existing serverless cache. The snapshot is + created from this cache. Available for Redis only. +- `serverless_cache_snapshot_name`: The name for the snapshot being created. Must be unique + for the customer account. Available for Redis only. Must be between 1 and 255 characters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"KmsKeyId"`: The ID of the KMS key used to encrypt the snapshot. Available for Redis + only. Default: NULL +- `"Tags"`: A list of tags to be added to the snapshot resource. A tag is a key-value pair. + Available for Redis only. +""" +function create_serverless_cache_snapshot( + ServerlessCacheName, + ServerlessCacheSnapshotName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "CreateServerlessCacheSnapshot", + Dict{String,Any}( + "ServerlessCacheName" => ServerlessCacheName, + "ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_serverless_cache_snapshot( + ServerlessCacheName, + ServerlessCacheSnapshotName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "CreateServerlessCacheSnapshot", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ServerlessCacheName" => ServerlessCacheName, + "ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_snapshot(snapshot_name) create_snapshot(snapshot_name, params::Dict{String,<:Any}) @@ -1144,7 +1339,7 @@ Using Role Based Access Control (RBAC) # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Tags"`: A list of tags to be added to this resource. A tag is a key-value pair. A tag - key must be accompanied by a tag value, although null is accepted. + key must be accompanied by a tag value, although null is accepted. Available for Redis only. - `"UserIds"`: The list of user IDs that belong to the user group. """ function create_user_group( @@ -1596,6 +1791,91 @@ function delete_replication_group( ) end +""" + delete_serverless_cache(serverless_cache_name) + delete_serverless_cache(serverless_cache_name, params::Dict{String,<:Any}) + +Deletes a specified existing serverless cache. + +# Arguments +- `serverless_cache_name`: The identifier of the serverless cache to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FinalSnapshotName"`: Name of the final snapshot to be taken before the serverless cache + is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken. +""" +function delete_serverless_cache( + ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "DeleteServerlessCache", + Dict{String,Any}("ServerlessCacheName" => ServerlessCacheName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_serverless_cache( + ServerlessCacheName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "DeleteServerlessCache", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ServerlessCacheName" => ServerlessCacheName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_serverless_cache_snapshot(serverless_cache_snapshot_name) + delete_serverless_cache_snapshot(serverless_cache_snapshot_name, params::Dict{String,<:Any}) + +Deletes an existing serverless cache snapshot. Available for Redis only. + +# Arguments +- `serverless_cache_snapshot_name`: Idenfitier of the snapshot to be deleted. Available for + Redis only. + +""" +function delete_serverless_cache_snapshot( + ServerlessCacheSnapshotName; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "DeleteServerlessCacheSnapshot", + Dict{String,Any}("ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_serverless_cache_snapshot( + ServerlessCacheSnapshotName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "DeleteServerlessCacheSnapshot", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_snapshot(snapshot_name) delete_snapshot(snapshot_name, params::Dict{String,<:Any}) @@ -2123,38 +2403,41 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys those reservations matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous - generation counterparts. General purpose: Current generation: M6g node types - (available only for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, - cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge For region availability, see - Supported Node Types M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, - cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types - (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 - onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters - are still supported but creation of new clusters is not supported for these types.) T1 - node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not - recommended. Existing clusters are still supported but creation of new clusters is not - supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: - Current generation: R6g node types (available only for Redis engine version 5.0.6 onward - and for Memcached engine version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, + generation counterparts. General purpose: Current generation: M7g node types: + cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types + M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached + engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node + types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for + Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, + cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + cache.t2.medium Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing + clusters are still supported but creation of new clusters is not supported for these + types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g + node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, + cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see + Supported Node Types R6g node types (available only for Redis engine version 5.0.6 + onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge For region availability, see Supported Node Types R5 node types: - cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not - recommended. Existing clusters are still supported but creation of new clusters is not - supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, - cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current - generation instance types are created in Amazon VPC by default. Redis append-only files - (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is - not supported on T1 instances. Redis configuration variables appendonly and appendfsync - are not supported on Redis version 2.8.22 and later. + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + Previous generation: (not recommended. Existing clusters are still supported but creation + of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, + cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All + current generation instance types are created in Amazon VPC by default. Redis append-only + files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic + failover is not supported on T1 instances. Redis configuration variables appendonly and + appendfsync are not supported on Redis version 2.8.22 and later. - `"Duration"`: The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 @@ -2203,38 +2486,41 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys available offerings matching the specified cache node type. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous - generation counterparts. General purpose: Current generation: M6g node types - (available only for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, - cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge For region availability, see - Supported Node Types M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, - cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types - (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 - onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters - are still supported but creation of new clusters is not supported for these types.) T1 - node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not - recommended. Existing clusters are still supported but creation of new clusters is not - supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: - Current generation: R6g node types (available only for Redis engine version 5.0.6 onward - and for Memcached engine version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, + generation counterparts. General purpose: Current generation: M7g node types: + cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types + M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached + engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, + cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node + types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, + cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, + cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for + Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): + cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, + cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, + cache.t2.medium Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing + clusters are still supported but creation of new clusters is not supported for these + types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g + node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, + cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see + Supported Node Types R6g node types (available only for Redis engine version 5.0.6 + onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge For region availability, see Supported Node Types R5 node types: - cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not - recommended. Existing clusters are still supported but creation of new clusters is not - supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, - cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current - generation instance types are created in Amazon VPC by default. Redis append-only files - (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic failover is - not supported on T1 instances. Redis configuration variables appendonly and appendfsync - are not supported on Redis version 2.8.22 and later. + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + Previous generation: (not recommended. Existing clusters are still supported but creation + of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, + cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, + cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All + current generation instance types are created in Amazon VPC by default. Redis append-only + files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic + failover is not supported on T1 instances. Redis configuration variables appendonly and + appendfsync are not supported on Redis version 2.8.22 and later. - `"Duration"`: Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration. Valid Values: 1 | 3 | 31536000 | 94608000 - `"Marker"`: An optional marker returned from a prior request. Use this marker for @@ -2273,6 +2559,87 @@ function describe_reserved_cache_nodes_offerings( ) end +""" + describe_serverless_cache_snapshots() + describe_serverless_cache_snapshots(params::Dict{String,<:Any}) + +Returns information about serverless cache snapshots. By default, this API lists all of the +customer’s serverless cache snapshots. It can also describe a single serverless cache +snapshot, or the snapshots associated with a particular serverless cache. Available for +Redis only. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of records to include in the response. If more records + exist than the specified max-results value, a market is included in the response so that + remaining results can be retrieved. Available for Redis only.The default is 50. The + Validation Constraints are a maximum of 50. +- `"NextToken"`: An optional marker returned from a prior request to support pagination of + results from this operation. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by max-results. Available for Redis + only. +- `"ServerlessCacheName"`: The identifier of serverless cache. If this parameter is + specified, only snapshots associated with that specific serverless cache are described. + Available for Redis only. +- `"ServerlessCacheSnapshotName"`: The identifier of the serverless cache’s snapshot. If + this parameter is specified, only this snapshot is described. Available for Redis only. +- `"SnapshotType"`: The type of snapshot that is being described. Available for Redis only. +""" +function describe_serverless_cache_snapshots(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "DescribeServerlessCacheSnapshots"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_serverless_cache_snapshots( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "DescribeServerlessCacheSnapshots", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_serverless_caches() + describe_serverless_caches(params::Dict{String,<:Any}) + +Returns information about a specific serverless cache. If no identifier is specified, then +the API returns information on all the serverless caches belonging to this Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of records in the response. If more records exist than + the specified max-records value, the next token is included in the response so that + remaining results can be retrieved. The default is 50. +- `"NextToken"`: An optional marker returned from a prior request to support pagination of + results from this operation. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by MaxResults. +- `"ServerlessCacheName"`: The identifier for the serverless cache. If this parameter is + specified, only information about that specific serverless cache is returned. Default: NULL +""" +function describe_serverless_caches(; aws_config::AbstractAWSConfig=global_aws_config()) + return elasticache( + "DescribeServerlessCaches"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_serverless_caches( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "DescribeServerlessCaches", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_service_updates() describe_service_updates(params::Dict{String,<:Any}) @@ -2503,6 +2870,58 @@ function disassociate_global_replication_group( ) end +""" + export_serverless_cache_snapshot(s3_bucket_name, serverless_cache_snapshot_name) + export_serverless_cache_snapshot(s3_bucket_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) + +Provides the functionality to export the serverless cache snapshot data to Amazon S3. +Available for Redis only. + +# Arguments +- `s3_bucket_name`: Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 + bucket must also be in same region as the snapshot. Available for Redis only. +- `serverless_cache_snapshot_name`: The identifier of the serverless cache snapshot to be + exported to S3. Available for Redis only. + +""" +function export_serverless_cache_snapshot( + S3BucketName, + ServerlessCacheSnapshotName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "ExportServerlessCacheSnapshot", + Dict{String,Any}( + "S3BucketName" => S3BucketName, + "ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function export_serverless_cache_snapshot( + S3BucketName, + ServerlessCacheSnapshotName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "ExportServerlessCacheSnapshot", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "S3BucketName" => S3BucketName, + "ServerlessCacheSnapshotName" => ServerlessCacheSnapshotName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ failover_global_replication_group(global_replication_group_id, primary_region, primary_replication_group_id) failover_global_replication_group(global_replication_group_id, primary_region, primary_replication_group_id, params::Dict{String,<:Any}) @@ -2568,7 +2987,7 @@ Increase the number of node groups in the Global datastore - `apply_immediately`: Indicates that the process begins immediately. At present, the only permitted value for this parameter is true. - `global_replication_group_id`: The name of the Global datastore -- `node_group_count`: The number of node groups you wish to add +- `node_group_count`: Total number of node groups you want # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2794,8 +3213,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys no more than 128 characters in length Cannot contain any of the following characters: '/', '\"', or '@', '%' For more information, see AUTH password at AUTH. - `"AuthTokenUpdateStrategy"`: Specifies the strategy to use to update the AUTH token. This - parameter must be specified with the auth-token parameter. Possible values: Rotate Set - For more information, see Authenticating Users with Redis AUTH + parameter must be specified with the auth-token parameter. Possible values: ROTATE - + default, if no update strategy is provided SET - allowed only after ROTATE DELETE - + allowed only when transitioning to RBAC For more information, see Authenticating Users + with Redis AUTH - `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. @@ -3086,10 +3507,10 @@ end modify_replication_group(replication_group_id) modify_replication_group(replication_group_id, params::Dict{String,<:Any}) -Modifies the settings for a replication group. Scaling for Amazon ElastiCache for Redis -(cluster mode enabled) in the ElastiCache User Guide -ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation -is valid for Redis only. +Modifies the settings for a replication group. This is limited to Redis 7 and newer. +Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in the ElastiCache User +Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This +operation is valid for Redis only. # Arguments - `replication_group_id`: The identifier of the replication group to modify. @@ -3108,8 +3529,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '\"', or '@', '%' For more information, see AUTH password at AUTH. - `"AuthTokenUpdateStrategy"`: Specifies the strategy to use to update the AUTH token. This - parameter must be specified with the auth-token parameter. Possible values: Rotate Set - For more information, see Authenticating Users with Redis AUTH + parameter must be specified with the auth-token parameter. Possible values: ROTATE - + default, if no update strategy is provided SET - allowed only after ROTATE DELETE - + allowed only when transitioning to RBAC For more information, see Authenticating Users + with Redis AUTH - `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. @@ -3294,6 +3717,66 @@ function modify_replication_group_shard_configuration( ) end +""" + modify_serverless_cache(serverless_cache_name) + modify_serverless_cache(serverless_cache_name, params::Dict{String,<:Any}) + +This API modifies the attributes of a serverless cache. + +# Arguments +- `serverless_cache_name`: User-provided identifier for the serverless cache to be modified. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CacheUsageLimits"`: Modify the cache usage limit for the serverless cache. +- `"DailySnapshotTime"`: The daily time during which Elasticache begins taking a daily + snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the + existing snapshot time configured for the cluster is not removed. +- `"Description"`: User provided description for the serverless cache. Default = NULL, i.e. + the existing description is not removed/modified. The description has a maximum length of + 255 characters. +- `"RemoveUserGroup"`: The identifier of the UserGroup to be removed from association with + the Redis serverless cache. Available for Redis only. Default is NULL. +- `"SecurityGroupIds"`: The new list of VPC security groups to be associated with the + serverless cache. Populating this list means the current VPC security groups will be + removed. This security group is used to authorize traffic access for the VPC end-point + (private-link). Default = NULL - the existing list of VPC security groups is not removed. +- `"SnapshotRetentionLimit"`: The number of days for which Elasticache retains automatic + snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing + snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 + days. +- `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless + cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed. +""" +function modify_serverless_cache( + ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticache( + "ModifyServerlessCache", + Dict{String,Any}("ServerlessCacheName" => ServerlessCacheName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_serverless_cache( + ServerlessCacheName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "ModifyServerlessCache", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ServerlessCacheName" => ServerlessCacheName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_user(user_id) modify_user(user_id, params::Dict{String,<:Any}) @@ -3739,34 +4222,34 @@ end test_failover(node_group_id, replication_group_id) test_failover(node_group_id, replication_group_id, params::Dict{String,<:Any}) -Represents the input of a TestFailover operation which test automatic failover on a +Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the -following A customer can use this operation to test automatic failover on up to 5 shards -(called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. -If calling this operation on shards in different clusters (called replication groups in the -API and CLI), the calls can be made concurrently. If calling this operation multiple -times on different shards in the same Redis (cluster mode enabled) replication group, the -first node replacement must complete before a subsequent call can be made. To determine -whether the node replacement is complete you can check Events using the Amazon ElastiCache -console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover -related events, listed here in order of occurrance: Replication group message: Test -Failover API called for node group <node-group-id> Cache cluster message: Failover -from primary node <primary-node-id> to replica node <node-id> completed -Replication group message: Failover from primary node <primary-node-id> to replica -node <node-id> completed Cache cluster message: Recovering cache nodes -<node-id> Cache cluster message: Finished recovery for cache nodes <node-id> - For more information see: Viewing ElastiCache Events in the ElastiCache User Guide - DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the -ElastiCache User Guide. +following A customer can use this operation to test automatic failover on up to 15 +shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour +period. If calling this operation on shards in different clusters (called replication +groups in the API and CLI), the calls can be made concurrently. If calling this +operation multiple times on different shards in the same Redis (cluster mode enabled) +replication group, the first node replacement must complete before a subsequent call can be +made. To determine whether the node replacement is complete you can check Events using +the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the +following automatic failover related events, listed here in order of occurrance: +Replication group message: Test Failover API called for node group <node-group-id> +Cache cluster message: Failover from primary node <primary-node-id> to replica node +<node-id> completed Replication group message: Failover from primary node +<primary-node-id> to replica node <node-id> completed Cache cluster message: +Recovering cache nodes <node-id> Cache cluster message: Finished recovery for +cache nodes <node-id> For more information see: Viewing ElastiCache Events in +the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also +see, Testing Multi-AZ in the ElastiCache User Guide. # Arguments - `node_group_id`: The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic - failover on up to 5 node groups in any rolling 24-hour period. + failover on up to 15 node groups in any rolling 24-hour period. - `replication_group_id`: The name of the replication group (console: cluster) whose automatic failover is being tested by this operation. @@ -3804,3 +4287,53 @@ function test_failover( feature_set=SERVICE_FEATURE_SET, ) end + +""" + test_migration(customer_node_endpoint_list, replication_group_id) + test_migration(customer_node_endpoint_list, replication_group_id, params::Dict{String,<:Any}) + + Async API to test connection between source and target replication group. + +# Arguments +- `customer_node_endpoint_list`: List of endpoints from which data should be migrated. + List should have only one element. +- `replication_group_id`: The ID of the replication group to which data is to be migrated. + +""" +function test_migration( + CustomerNodeEndpointList, + ReplicationGroupId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "TestMigration", + Dict{String,Any}( + "CustomerNodeEndpointList" => CustomerNodeEndpointList, + "ReplicationGroupId" => ReplicationGroupId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_migration( + CustomerNodeEndpointList, + ReplicationGroupId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticache( + "TestMigration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CustomerNodeEndpointList" => CustomerNodeEndpointList, + "ReplicationGroupId" => ReplicationGroupId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/elasticsearch_service.jl b/src/services/elasticsearch_service.jl index daa995cc45..ffacc8a54b 100644 --- a/src/services/elasticsearch_service.jl +++ b/src/services/elasticsearch_service.jl @@ -153,6 +153,44 @@ function authorize_vpc_endpoint_access( ) end +""" + cancel_domain_config_change(domain_name) + cancel_domain_config_change(domain_name, params::Dict{String,<:Any}) + +Cancels a pending configuration change on an Amazon OpenSearch Service domain. + +# Arguments +- `domain_name`: Name of the OpenSearch Service domain configuration request to cancel. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: When set to True, returns the list of change IDs and properties that will be + cancelled without actually cancelling the change. +""" +function cancel_domain_config_change( + DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return elasticsearch_service( + "POST", + "/2015-01-01/es/domain/$(DomainName)/config/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_domain_config_change( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elasticsearch_service( + "POST", + "/2015-01-01/es/domain/$(DomainName)/config/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_elasticsearch_service_software_update(domain_name) cancel_elasticsearch_service_software_update(domain_name, params::Dict{String,<:Any}) diff --git a/src/services/emr.jl b/src/services/emr.jl index a77031a607..c78969e6db 100644 --- a/src/services/emr.jl +++ b/src/services/emr.jl @@ -319,6 +319,13 @@ Creates a new Amazon EMR Studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A detailed description of the Amazon EMR Studio. +- `"EncryptionKeyArn"`: The KMS key identifier (ARN) used to encrypt Amazon EMR Studio + workspace and notebook files when backed up to Amazon S3. +- `"IdcInstanceArn"`: The ARN of the IAM Identity Center instance to create the Studio + application. +- `"IdcUserAssignment"`: Specifies whether IAM Identity Center user assignment is REQUIRED + or OPTIONAL. If the value is set to REQUIRED, users must be explicitly assigned to the + Studio application to access the Studio. - `"IdpAuthUrl"`: The authentication endpoint of your identity provider (IdP). Specify this value when you use IAM authentication and want to let federated users log in to a Studio with the Studio URL and credentials from your IdP. Amazon EMR Studio redirects users to @@ -330,6 +337,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: A list of tags to associate with the Amazon EMR Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters. +- `"TrustedIdentityPropagationEnabled"`: A Boolean indicating whether to enable Trusted + identity propagation for the Studio. The default value is false. - `"UserRole"`: The IAM user role that users and groups assume when logged in to an Amazon EMR Studio. Only specify a UserRole when you use IAM Identity Center authentication. The permissions attached to the UserRole can be scoped down for each user or group using @@ -888,8 +897,8 @@ function get_block_public_access_configuration( end """ - get_cluster_session_credentials(cluster_id, execution_role_arn) - get_cluster_session_credentials(cluster_id, execution_role_arn, params::Dict{String,<:Any}) + get_cluster_session_credentials(cluster_id) + get_cluster_session_credentials(cluster_id, params::Dict{String,<:Any}) Provides temporary, HTTP basic credentials that are associated with a given runtime IAM role and used by a cluster with fine-grained access control activated. You can use these @@ -898,38 +907,33 @@ authentication. # Arguments - `cluster_id`: The unique identifier of the cluster. -- `execution_role_arn`: The Amazon Resource Name (ARN) of the runtime role for interactive + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of the runtime role for interactive workload submission on the cluster. The runtime role can be a cross-account IAM role. The runtime role ARN is a combination of account ID, role name, and role type using the following format: arn:partition:service:region:account:resource. - """ function get_cluster_session_credentials( - ClusterId, ExecutionRoleArn; aws_config::AbstractAWSConfig=global_aws_config() + ClusterId; aws_config::AbstractAWSConfig=global_aws_config() ) return emr( "GetClusterSessionCredentials", - Dict{String,Any}("ClusterId" => ClusterId, "ExecutionRoleArn" => ExecutionRoleArn); + Dict{String,Any}("ClusterId" => ClusterId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_cluster_session_credentials( ClusterId, - ExecutionRoleArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return emr( "GetClusterSessionCredentials", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClusterId" => ClusterId, "ExecutionRoleArn" => ExecutionRoleArn - ), - params, - ), + mergewith(_merge, Dict{String,Any}("ClusterId" => ClusterId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1414,6 +1418,49 @@ function list_studios( ) end +""" + list_supported_instance_types(release_label) + list_supported_instance_types(release_label, params::Dict{String,<:Any}) + +A list of the instance types that Amazon EMR supports. You can filter the list by Amazon +Web Services Region and Amazon EMR release. + +# Arguments +- `release_label`: The Amazon EMR release label determines the versions of open-source + application packages that Amazon EMR has installed on the cluster. Release labels are in + the format emr-x.x.x, where x.x.x is an Amazon EMR release number such as emr-6.10.0. For + more information about Amazon EMR releases and their included application versions and + features, see the Amazon EMR Release Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The pagination token that marks the next set of results to retrieve. +""" +function list_supported_instance_types( + ReleaseLabel; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr( + "ListSupportedInstanceTypes", + Dict{String,Any}("ReleaseLabel" => ReleaseLabel); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_supported_instance_types( + ReleaseLabel, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "ListSupportedInstanceTypes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ReleaseLabel" => ReleaseLabel), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_cluster(cluster_id) modify_cluster(cluster_id, params::Dict{String,<:Any}) @@ -1948,9 +1995,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys AmiVersion instead. For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI. +- `"EbsRootVolumeIops"`: The IOPS, of the Amazon EBS root device volume of the Linux AMI + that is used for each Amazon EC2 instance. Available in Amazon EMR releases 6.15.0 and + later. - `"EbsRootVolumeSize"`: The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each Amazon EC2 instance. Available in Amazon EMR releases 4.x and later. +- `"EbsRootVolumeThroughput"`: The throughput, in MiB/s, of the Amazon EBS root device + volume of the Linux AMI that is used for each Amazon EC2 instance. Available in Amazon EMR + releases 6.15.0 and later. - `"JobFlowRole"`: Also called instance profile and Amazon EC2 role. An IAM role for an Amazon EMR cluster. The Amazon EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already @@ -2056,6 +2109,62 @@ function run_job_flow( ) end +""" + set_keep_job_flow_alive_when_no_steps(job_flow_ids, keep_job_flow_alive_when_no_steps) + set_keep_job_flow_alive_when_no_steps(job_flow_ids, keep_job_flow_alive_when_no_steps, params::Dict{String,<:Any}) + +You can use the SetKeepJobFlowAliveWhenNoSteps to configure a cluster (job flow) to +terminate after the step execution, i.e., all your steps are executed. If you want a +transient cluster that shuts down after the last of the current executing steps are +completed, you can configure SetKeepJobFlowAliveWhenNoSteps to false. If you want a long +running cluster, configure SetKeepJobFlowAliveWhenNoSteps to true. For more information, +see Managing Cluster Termination in the Amazon EMR Management Guide. + +# Arguments +- `job_flow_ids`: A list of strings that uniquely identify the clusters to protect. This + identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows. +- `keep_job_flow_alive_when_no_steps`: A Boolean that indicates whether to terminate the + cluster after all steps are executed. + +""" +function set_keep_job_flow_alive_when_no_steps( + JobFlowIds, + KeepJobFlowAliveWhenNoSteps; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "SetKeepJobFlowAliveWhenNoSteps", + Dict{String,Any}( + "JobFlowIds" => JobFlowIds, + "KeepJobFlowAliveWhenNoSteps" => KeepJobFlowAliveWhenNoSteps, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function set_keep_job_flow_alive_when_no_steps( + JobFlowIds, + KeepJobFlowAliveWhenNoSteps, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "SetKeepJobFlowAliveWhenNoSteps", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "JobFlowIds" => JobFlowIds, + "KeepJobFlowAliveWhenNoSteps" => KeepJobFlowAliveWhenNoSteps, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_termination_protection(job_flow_ids, termination_protected) set_termination_protection(job_flow_ids, termination_protected, params::Dict{String,<:Any}) @@ -2070,7 +2179,7 @@ ensure that in the event of an error, the instances persist so that you can reco data stored in their ephemeral instance storage. To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false. For more -information, seeManaging Cluster Termination in the Amazon EMR Management Guide. +information, see Managing Cluster Termination in the Amazon EMR Management Guide. # Arguments - `job_flow_ids`: A list of strings that uniquely identify the clusters to protect. This @@ -2115,6 +2224,65 @@ function set_termination_protection( ) end +""" + set_unhealthy_node_replacement(job_flow_ids, unhealthy_node_replacement) + set_unhealthy_node_replacement(job_flow_ids, unhealthy_node_replacement, params::Dict{String,<:Any}) + +Specify whether to enable unhealthy node replacement, which lets Amazon EMR gracefully +replace core nodes on a cluster if any nodes become unhealthy. For example, a node becomes +unhealthy if disk usage is above 90%. If unhealthy node replacement is on and +TerminationProtected are off, Amazon EMR immediately terminates the unhealthy core nodes. +To use unhealthy node replacement and retain unhealthy core nodes, use to turn on +termination protection. In such cases, Amazon EMR adds the unhealthy nodes to a denylist, +reducing job interruptions and failures. If unhealthy node replacement is on, Amazon EMR +notifies YARN and other applications on the cluster to stop scheduling tasks with these +nodes, moves the data, and then terminates the nodes. For more information, see graceful +node replacement in the Amazon EMR Management Guide. + +# Arguments +- `job_flow_ids`: The list of strings that uniquely identify the clusters for which to turn + on unhealthy node replacement. You can get these identifiers by running the RunJobFlow or + the DescribeJobFlows operations. +- `unhealthy_node_replacement`: Indicates whether to turn on or turn off graceful unhealthy + node replacement. + +""" +function set_unhealthy_node_replacement( + JobFlowIds, UnhealthyNodeReplacement; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr( + "SetUnhealthyNodeReplacement", + Dict{String,Any}( + "JobFlowIds" => JobFlowIds, + "UnhealthyNodeReplacement" => UnhealthyNodeReplacement, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function set_unhealthy_node_replacement( + JobFlowIds, + UnhealthyNodeReplacement, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "SetUnhealthyNodeReplacement", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "JobFlowIds" => JobFlowIds, + "UnhealthyNodeReplacement" => UnhealthyNodeReplacement, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_visible_to_all_users(job_flow_ids, visible_to_all_users) set_visible_to_all_users(job_flow_ids, visible_to_all_users, params::Dict{String,<:Any}) @@ -2338,6 +2506,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DefaultS3Location"`: The Amazon S3 location to back up Workspaces and notebook files for the Amazon EMR Studio. - `"Description"`: A detailed description to assign to the Amazon EMR Studio. +- `"EncryptionKeyArn"`: The KMS key identifier (ARN) used to encrypt Amazon EMR Studio + workspace and notebook files when backed up to Amazon S3. - `"Name"`: A descriptive name for the Amazon EMR Studio. - `"SubnetIds"`: A list of subnet IDs to associate with the Amazon EMR Studio. The list can include new subnet IDs, but must also include all of the subnet IDs previously associated diff --git a/src/services/emr_containers.jl b/src/services/emr_containers.jl index 459d94e603..ba66ea68c6 100644 --- a/src/services/emr_containers.jl +++ b/src/services/emr_containers.jl @@ -180,6 +180,69 @@ function create_managed_endpoint( ) end +""" + create_security_configuration(client_token, name, security_configuration_data) + create_security_configuration(client_token, name, security_configuration_data, params::Dict{String,<:Any}) + +Creates a security configuration. Security configurations in Amazon EMR on EKS are +templates for different security setups. You can use security configurations to configure +the Lake Formation integration setup. You can also create a security configuration to +re-use a security setup each time you create a virtual cluster. + +# Arguments +- `client_token`: The client idempotency token to use when creating the security + configuration. +- `name`: The name of the security configuration. +- `security_configuration_data`: Security configuration input for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: The tags to add to the security configuration. +""" +function create_security_configuration( + clientToken, + name, + securityConfigurationData; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr_containers( + "POST", + "/securityconfigurations", + Dict{String,Any}( + "clientToken" => clientToken, + "name" => name, + "securityConfigurationData" => securityConfigurationData, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_security_configuration( + clientToken, + name, + securityConfigurationData, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr_containers( + "POST", + "/securityconfigurations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "name" => name, + "securityConfigurationData" => securityConfigurationData, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_virtual_cluster(client_token, container_provider, name) create_virtual_cluster(client_token, container_provider, name, params::Dict{String,<:Any}) @@ -197,6 +260,7 @@ namespaces to meet your requirements. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"securityConfigurationId"`: The ID of the security configuration. - `"tags"`: The tags assigned to the virtual cluster. """ function create_virtual_cluster( @@ -462,6 +526,44 @@ function describe_managed_endpoint( ) end +""" + describe_security_configuration(security_configuration_id) + describe_security_configuration(security_configuration_id, params::Dict{String,<:Any}) + +Displays detailed information about a specified security configuration. Security +configurations in Amazon EMR on EKS are templates for different security setups. You can +use security configurations to configure the Lake Formation integration setup. You can also +create a security configuration to re-use a security setup each time you create a virtual +cluster. + +# Arguments +- `security_configuration_id`: The ID of the security configuration. + +""" +function describe_security_configuration( + securityConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr_containers( + "GET", + "/securityconfigurations/$(securityConfigurationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_security_configuration( + securityConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr_containers( + "GET", + "/securityconfigurations/$(securityConfigurationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_virtual_cluster(virtual_cluster_id) describe_virtual_cluster(virtual_cluster_id, params::Dict{String,<:Any}) @@ -686,6 +788,42 @@ function list_managed_endpoints( ) end +""" + list_security_configurations() + list_security_configurations(params::Dict{String,<:Any}) + +Lists security configurations based on a set of parameters. Security configurations in +Amazon EMR on EKS are templates for different security setups. You can use security +configurations to configure the Lake Formation integration setup. You can also create a +security configuration to re-use a security setup each time you create a virtual cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"createdAfter"`: The date and time after which the security configuration was created. +- `"createdBefore"`: The date and time before which the security configuration was created. +- `"maxResults"`: The maximum number of security configurations the operation can list. +- `"nextToken"`: The token for the next set of security configurations to return. +""" +function list_security_configurations(; aws_config::AbstractAWSConfig=global_aws_config()) + return emr_containers( + "GET", + "/securityconfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_security_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr_containers( + "GET", + "/securityconfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -737,6 +875,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys is the only supported type as of now. - `"createdAfter"`: The date and time after which the virtual clusters are created. - `"createdBefore"`: The date and time before which the virtual clusters are created. +- `"eksAccessEntryIntegrated"`: Optional Boolean that specifies whether the operation + should return the virtual clusters that have the access entry integration enabled or + disabled. If not specified, the operation returns all applicable virtual clusters. - `"maxResults"`: The maximum number of virtual clusters that can be listed. - `"nextToken"`: The token for the next set of virtual clusters to return. - `"states"`: The states of the requested virtual clusters. diff --git a/src/services/emr_serverless.jl b/src/services/emr_serverless.jl index 43b98f6cb6..5b50fe0746 100644 --- a/src/services/emr_serverless.jl +++ b/src/services/emr_serverless.jl @@ -49,7 +49,7 @@ Creates an application. # Arguments - `client_token`: The client idempotency token of the application to create. Its value must be unique for each request. -- `release_label`: The EMR release associated with the application. +- `release_label`: The Amazon EMR release associated with the application. - `type`: The type of application you want to start, such as Spark or Hive. # Optional Parameters @@ -62,12 +62,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"imageConfiguration"`: The image configuration for all worker types. You can either set this parameter or imageConfiguration for each worker type in workerTypeSpecifications. - `"initialCapacity"`: The capacity to initialize when the application is created. +- `"interactiveConfiguration"`: The interactive configuration object that enables the + interactive use cases to use when running an application. - `"maximumCapacity"`: The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. +- `"monitoringConfiguration"`: The configuration setting for monitoring. - `"name"`: The name of the application. - `"networkConfiguration"`: The network configuration for customer VPC connectivity. +- `"runtimeConfiguration"`: The Configuration specifications to use when creating an + application. Each configuration consists of a classification and properties. This + configuration is applied to all the job runs submitted under the application. - `"tags"`: The tags assigned to the application. - `"workerTypeSpecifications"`: The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a @@ -186,13 +192,21 @@ end get_dashboard_for_job_run(application_id, job_run_id) get_dashboard_for_job_run(application_id, job_run_id, params::Dict{String,<:Any}) -Returns a URL to access the job run dashboard. The generated URL is valid for one hour, -after which you must invoke the API again to generate a new URL. +Creates and returns a URL that you can use to access the application UIs for a job run. For +jobs in a running state, the application UI is a live user interface such as the Spark or +Tez web UI. For completed jobs, the application UI is a persistent application user +interface such as the Spark History Server or persistent Tez UI. The URL is valid for one +hour after you generate it. To access the application UI after that hour elapses, you must +invoke the API again to generate a new URL. # Arguments - `application_id`: The ID of the application. - `job_run_id`: The ID of the job run. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attempt"`: An optimal parameter that indicates the amount of attempts for the job. If + not specified, this value defaults to the attempt of the latest job. """ function get_dashboard_for_job_run( applicationId, jobRunId; aws_config::AbstractAWSConfig=global_aws_config() @@ -229,6 +243,10 @@ Displays detailed information about a job run. - `application_id`: The ID of the application on which the job run is submitted. - `job_run_id`: The ID of the job run. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attempt"`: An optimal parameter that indicates the amount of attempts for the job. If + not specified, this value defaults to the attempt of the latest job. """ function get_job_run( applicationId, jobRunId; aws_config::AbstractAWSConfig=global_aws_config() @@ -285,6 +303,46 @@ function list_applications( ) end +""" + list_job_run_attempts(application_id, job_run_id) + list_job_run_attempts(application_id, job_run_id, params::Dict{String,<:Any}) + +Lists all attempt of a job run. + +# Arguments +- `application_id`: The ID of the application for which to list job runs. +- `job_run_id`: The ID of the job run to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of job run attempts to list. +- `"nextToken"`: The token for the next set of job run attempt results. +""" +function list_job_run_attempts( + applicationId, jobRunId; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr_serverless( + "GET", + "/applications/$(applicationId)/jobruns/$(jobRunId)/attempts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_job_run_attempts( + applicationId, + jobRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr_serverless( + "GET", + "/applications/$(applicationId)/jobruns/$(jobRunId)/attempts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_job_runs(application_id) list_job_runs(application_id, params::Dict{String,<:Any}) @@ -299,6 +357,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"createdAtAfter"`: The lower bound of the option to filter by creation date and time. - `"createdAtBefore"`: The upper bound of the option to filter by creation date and time. - `"maxResults"`: The maximum number of job runs that can be listed. +- `"mode"`: The mode of the job runs to list. - `"nextToken"`: The token for the next set of job run results. - `"states"`: An optional filter for job run states. Note that if this filter contains multiple states, the resulting list will be grouped by the state. @@ -411,7 +470,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"executionTimeoutMinutes"`: The maximum duration for the job run to run. If the job run runs beyond this duration, it will be automatically cancelled. - `"jobDriver"`: The job driver for the job run. +- `"mode"`: The mode of the job run when it starts. - `"name"`: The optional job run name. This doesn't have to be unique. +- `"retryPolicy"`: The retry policy when job run starts. - `"tags"`: The tags assigned to the job run. """ function start_job_run( @@ -590,10 +651,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys either set this parameter or imageConfiguration for each worker type in WorkerTypeSpecificationInput. - `"initialCapacity"`: The capacity to initialize when the application is updated. +- `"interactiveConfiguration"`: The interactive configuration object that contains new + interactive use cases when the application is updated. - `"maximumCapacity"`: The maximum capacity to allocate when the application is updated. This is cumulative across all workers at any given point in time during the lifespan of the application. No new resources will be created once any one of the defined limits is hit. +- `"monitoringConfiguration"`: The configuration setting for monitoring. - `"networkConfiguration"`: +- `"releaseLabel"`: The Amazon EMR release label for the application. You can change the + release label to use a different release of Amazon EMR. +- `"runtimeConfiguration"`: The Configuration specifications to use when updating an + application. Each configuration consists of a classification and properties. This + configuration is applied across all the job runs submitted under the application. - `"workerTypeSpecifications"`: The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark diff --git a/src/services/entityresolution.jl b/src/services/entityresolution.jl new file mode 100644 index 0000000000..6ad0a84154 --- /dev/null +++ b/src/services/entityresolution.jl @@ -0,0 +1,1550 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: entityresolution +using AWS.Compat +using AWS.UUIDs + +""" + add_policy_statement(action, arn, effect, principal, statement_id) + add_policy_statement(action, arn, effect, principal, statement_id, params::Dict{String,<:Any}) + +Adds a policy statement object. To retrieve a list of existing policy statements, use the +GetPolicy API. + +# Arguments +- `action`: The action that the principal can use on the resource. For example, + entityresolution:GetIdMappingJob, entityresolution:GetMatchingJob. +- `arn`: The Amazon Resource Name (ARN) of the resource that will be accessed by the + principal. +- `effect`: Determines whether the permissions specified in the policy are to be allowed + (Allow) or denied (Deny). +- `principal`: The Amazon Web Services service or Amazon Web Services account that can + access the resource defined as ARN. +- `statement_id`: A statement identifier that differentiates the statement from others in + the same policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"condition"`: A set of condition keys that you can use in key policies. +""" +function add_policy_statement( + action, + arn, + effect, + principal, + statementId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/policies/$(arn)/$(statementId)", + Dict{String,Any}("action" => action, "effect" => effect, "principal" => principal); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_policy_statement( + action, + arn, + effect, + principal, + statementId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/policies/$(arn)/$(statementId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "action" => action, "effect" => effect, "principal" => principal + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_delete_unique_id(unique_ids, workflow_name) + batch_delete_unique_id(unique_ids, workflow_name, params::Dict{String,<:Any}) + +Deletes multiple unique IDs in a matching workflow. + +# Arguments +- `unique_ids`: The unique IDs to delete. +- `workflow_name`: The name of the workflow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"inputSource"`: The input source for the batch delete unique ID operation. +""" +function batch_delete_unique_id( + uniqueIds, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)/uniqueids", + Dict{String,Any}("headers" => Dict{String,Any}("uniqueIds" => uniqueIds)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_unique_id( + uniqueIds, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)/uniqueids", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("headers" => Dict{String,Any}("uniqueIds" => uniqueIds)), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + +Creates an IdMappingWorkflow object which stores the configuration of the data processing +job to be run. Each IdMappingWorkflow must have a unique workflow name. To modify an +existing workflow, use the UpdateIdMappingWorkflow API. + +# Arguments +- `id_mapping_techniques`: An object which defines the idMappingType and the + providerProperties. +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. +- `workflow_name`: The name of the workflow. There can't be multiple IdMappingWorkflows + with the same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"outputSourceConfig"`: A list of IdMappingWorkflowOutputSource objects, each of which + contains fields OutputS3Path and Output. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_id_mapping_workflow( + idMappingTechniques, + inputSourceConfig, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/idmappingworkflows", + Dict{String,Any}( + "idMappingTechniques" => idMappingTechniques, + "inputSourceConfig" => inputSourceConfig, + "roleArn" => roleArn, + "workflowName" => workflowName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_mapping_workflow( + idMappingTechniques, + inputSourceConfig, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/idmappingworkflows", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "idMappingTechniques" => idMappingTechniques, + "inputSourceConfig" => inputSourceConfig, + "roleArn" => roleArn, + "workflowName" => workflowName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_namespace(id_namespace_name, type) + create_id_namespace(id_namespace_name, type, params::Dict{String,<:Any}) + +Creates an ID namespace object which will help customers provide metadata explaining their +dataset and how to use it. Each ID namespace must have a unique name. To modify an existing +ID namespace, use the UpdateIdNamespace API. + +# Arguments +- `id_namespace_name`: The name of the ID namespace. +- `type`: The type of ID namespace. There are two types: SOURCE and TARGET. The SOURCE + contains configurations for sourceId data that will be processed in an ID mapping workflow. + The TARGET contains a configuration of targetId to which all sourceIds will resolve to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the ID namespace. +- `"idMappingWorkflowProperties"`: Determines the properties of IdMappingWorflow where this + IdNamespace can be used as a Source or a Target. +- `"inputSourceConfig"`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access the resources defined in this IdNamespace on your behalf as part of the + workflow run. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_id_namespace( + idNamespaceName, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/idnamespaces", + Dict{String,Any}("idNamespaceName" => idNamespaceName, "type" => type); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_namespace( + idNamespaceName, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/idnamespaces", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("idNamespaceName" => idNamespaceName, "type" => type), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name) + create_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name, params::Dict{String,<:Any}) + +Creates a MatchingWorkflow object which stores the configuration of the data processing job +to be run. It is important to note that there should not be a pre-existing MatchingWorkflow +with the same name. To modify an existing workflow, utilize the UpdateMatchingWorkflow API. + +# Arguments +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `output_source_config`: A list of OutputSource objects, each of which contains fields + OutputS3Path, ApplyNormalization, and Output. +- `resolution_techniques`: An object which defines the resolutionType and the + ruleBasedProperties. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. +- `workflow_name`: The name of the workflow. There can't be multiple MatchingWorkflows with + the same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"incrementalRunConfig"`: An object which defines an incremental run type and has only + incrementalRunType as a field. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows", + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + "workflowName" => workflowName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + "workflowName" => workflowName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_schema_mapping(mapped_input_fields, schema_name) + create_schema_mapping(mapped_input_fields, schema_name, params::Dict{String,<:Any}) + +Creates a schema mapping, which defines the schema of the input customer records table. The +SchemaMapping also provides Entity Resolution with some metadata about the table, such as +the attribute types of the columns and which columns to match on. + +# Arguments +- `mapped_input_fields`: A list of MappedInputFields. Each MappedInputField corresponds to + a column the source data table, and contains column name plus additional information that + Entity Resolution uses for matching. +- `schema_name`: The name of the schema. There can't be multiple SchemaMappings with the + same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the schema. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_schema_mapping( + mappedInputFields, schemaName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/schemas", + Dict{String,Any}( + "mappedInputFields" => mappedInputFields, "schemaName" => schemaName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_schema_mapping( + mappedInputFields, + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/schemas", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "mappedInputFields" => mappedInputFields, "schemaName" => schemaName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_mapping_workflow(workflow_name) + delete_id_mapping_workflow(workflow_name, params::Dict{String,<:Any}) + +Deletes the IdMappingWorkflow with a given name. This operation will succeed even if a +workflow with the given name does not exist. + +# Arguments +- `workflow_name`: The name of the workflow to be deleted. + +""" +function delete_id_mapping_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/idmappingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_mapping_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/idmappingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_namespace(id_namespace_name) + delete_id_namespace(id_namespace_name, params::Dict{String,<:Any}) + +Deletes the IdNamespace with a given name. + +# Arguments +- `id_namespace_name`: The name of the ID namespace. + +""" +function delete_id_namespace( + idNamespaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/idnamespaces/$(idNamespaceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_namespace( + idNamespaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/idnamespaces/$(idNamespaceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_matching_workflow(workflow_name) + delete_matching_workflow(workflow_name, params::Dict{String,<:Any}) + +Deletes the MatchingWorkflow with a given name. This operation will succeed even if a +workflow with the given name does not exist. + +# Arguments +- `workflow_name`: The name of the workflow to be retrieved. + +""" +function delete_matching_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_matching_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_policy_statement(arn, statement_id) + delete_policy_statement(arn, statement_id, params::Dict{String,<:Any}) + +Deletes the policy statement. + +# Arguments +- `arn`: The ARN of the resource for which the policy need to be deleted. +- `statement_id`: A statement identifier that differentiates the statement from others in + the same policy. + +""" +function delete_policy_statement( + arn, statementId; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/policies/$(arn)/$(statementId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_policy_statement( + arn, + statementId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/policies/$(arn)/$(statementId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_schema_mapping(schema_name) + delete_schema_mapping(schema_name, params::Dict{String,<:Any}) + +Deletes the SchemaMapping with a given name. This operation will succeed even if a schema +with the given name does not exist. This operation will fail if there is a MatchingWorkflow +object that references the SchemaMapping in the workflow's InputSourceConfig. + +# Arguments +- `schema_name`: The name of the schema to delete. + +""" +function delete_schema_mapping( + schemaName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/schemas/$(schemaName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_schema_mapping( + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/schemas/$(schemaName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_id_mapping_job(job_id, workflow_name) + get_id_mapping_job(job_id, workflow_name, params::Dict{String,<:Any}) + +Gets the status, metrics, and errors (if there are any) that are associated with a job. + +# Arguments +- `job_id`: The ID of the job. +- `workflow_name`: The name of the workflow. + +""" +function get_id_mapping_job( + jobId, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)/jobs/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_id_mapping_job( + jobId, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)/jobs/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_id_mapping_workflow(workflow_name) + get_id_mapping_workflow(workflow_name, params::Dict{String,<:Any}) + +Returns the IdMappingWorkflow with a given name, if it exists. + +# Arguments +- `workflow_name`: The name of the workflow. + +""" +function get_id_mapping_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_id_mapping_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_id_namespace(id_namespace_name) + get_id_namespace(id_namespace_name, params::Dict{String,<:Any}) + +Returns the IdNamespace with a given name, if it exists. + +# Arguments +- `id_namespace_name`: The name of the ID namespace. + +""" +function get_id_namespace( + idNamespaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idnamespaces/$(idNamespaceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_id_namespace( + idNamespaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/idnamespaces/$(idNamespaceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_match_id(record, workflow_name) + get_match_id(record, workflow_name, params::Dict{String,<:Any}) + +Returns the corresponding Match ID of a customer record if the record has been processed. + +# Arguments +- `record`: The record to fetch the Match ID for. +- `workflow_name`: The name of the workflow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"applyNormalization"`: Normalizes the attributes defined in the schema in the input + data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in + the input table is in a format of 1234567890, Entity Resolution will normalize this field + in the output to (123)-456-7890. +""" +function get_match_id( + record, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/matches", + Dict{String,Any}("record" => record); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_match_id( + record, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/matches", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("record" => record), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_matching_job(job_id, workflow_name) + get_matching_job(job_id, workflow_name, params::Dict{String,<:Any}) + +Gets the status, metrics, and errors (if there are any) that are associated with a job. + +# Arguments +- `job_id`: The ID of the job. +- `workflow_name`: The name of the workflow. + +""" +function get_matching_job( + jobId, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_matching_job( + jobId, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_matching_workflow(workflow_name) + get_matching_workflow(workflow_name, params::Dict{String,<:Any}) + +Returns the MatchingWorkflow with a given name, if it exists. + +# Arguments +- `workflow_name`: The name of the workflow. + +""" +function get_matching_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_matching_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_policy(arn) + get_policy(arn, params::Dict{String,<:Any}) + +Returns the resource-based policy. + +# Arguments +- `arn`: The Amazon Resource Name (ARN) of the resource for which the policy need to be + returned. + +""" +function get_policy(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/policies/$(arn)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_policy( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/policies/$(arn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_provider_service(provider_name, provider_service_name) + get_provider_service(provider_name, provider_service_name, params::Dict{String,<:Any}) + +Returns the ProviderService of a given name. + +# Arguments +- `provider_name`: The name of the provider. This name is typically the company name. +- `provider_service_name`: The ARN (Amazon Resource Name) of the product that the provider + service provides. + +""" +function get_provider_service( + providerName, providerServiceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/providerservices/$(providerName)/$(providerServiceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_provider_service( + providerName, + providerServiceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/providerservices/$(providerName)/$(providerServiceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_schema_mapping(schema_name) + get_schema_mapping(schema_name, params::Dict{String,<:Any}) + +Returns the SchemaMapping of a given name. + +# Arguments +- `schema_name`: The name of the schema to be retrieved. + +""" +function get_schema_mapping(schemaName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", + "/schemas/$(schemaName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_schema_mapping( + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/schemas/$(schemaName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_mapping_jobs(workflow_name) + list_id_mapping_jobs(workflow_name, params::Dict{String,<:Any}) + +Lists all ID mapping jobs for a given workflow. + +# Arguments +- `workflow_name`: The name of the workflow to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_id_mapping_jobs( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_mapping_jobs( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/idmappingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_mapping_workflows() + list_id_mapping_workflows(params::Dict{String,<:Any}) + +Returns a list of all the IdMappingWorkflows that have been created for an Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_id_mapping_workflows(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/idmappingworkflows"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_id_mapping_workflows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idmappingworkflows", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_namespaces() + list_id_namespaces(params::Dict{String,<:Any}) + +Returns a list of all ID namespaces. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of IdNamespace objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_id_namespaces(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/idnamespaces"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_id_namespaces( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/idnamespaces", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_matching_jobs(workflow_name) + list_matching_jobs(workflow_name, params::Dict{String,<:Any}) + +Lists all jobs for a given workflow. + +# Arguments +- `workflow_name`: The name of the workflow to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_matching_jobs(workflowName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_matching_jobs( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_matching_workflows() + list_matching_workflows(params::Dict{String,<:Any}) + +Returns a list of all the MatchingWorkflows that have been created for an Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_matching_workflows(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/matchingworkflows"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_matching_workflows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_provider_services() + list_provider_services(params::Dict{String,<:Any}) + +Returns a list of all the ProviderServices that are available in this Amazon Web Services +Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +- `"providerName"`: The name of the provider. This name is typically the company name. +""" +function list_provider_services(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/providerservices"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_provider_services( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/providerservices", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_schema_mappings() + list_schema_mappings(params::Dict{String,<:Any}) + +Returns a list of all the SchemaMappings that have been created for an Amazon Web Services +account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous API call. +""" +function list_schema_mappings(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/schemas"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_schema_mappings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", "/schemas", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Displays the tags associated with an Entity Resolution resource. In Entity Resolution, +SchemaMapping, and MatchingWorkflow can be tagged. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to view tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_policy(arn, policy) + put_policy(arn, policy, params::Dict{String,<:Any}) + +Updates the resource-based policy. + +# Arguments +- `arn`: The Amazon Resource Name (ARN) of the resource for which the policy needs to be + updated. +- `policy`: The resource-based policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"token"`: A unique identifier for the current revision of the policy. +""" +function put_policy(arn, policy; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "PUT", + "/policies/$(arn)", + Dict{String,Any}("policy" => policy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_policy( + arn, + policy, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/policies/$(arn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("policy" => policy), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_id_mapping_job(workflow_name) + start_id_mapping_job(workflow_name, params::Dict{String,<:Any}) + +Starts the IdMappingJob of a workflow. The workflow must have previously been created using +the CreateIdMappingWorkflow endpoint. + +# Arguments +- `workflow_name`: The name of the ID mapping job to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"outputSourceConfig"`: A list of OutputSource objects. +""" +function start_id_mapping_job( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/idmappingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_id_mapping_job( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/idmappingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_matching_job(workflow_name) + start_matching_job(workflow_name, params::Dict{String,<:Any}) + +Starts the MatchingJob of a workflow. The workflow must have previously been created using +the CreateMatchingWorkflow endpoint. + +# Arguments +- `workflow_name`: The name of the matching job to be retrieved. + +""" +function start_matching_job(workflowName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_matching_job( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified Entity Resolution resource. +Tags can help you organize and categorize your resources. You can also use them to scope +user permissions by granting a user permission to access or change only resources with +certain tag values. In Entity Resolution, SchemaMapping and MatchingWorkflow can be tagged. +Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as +strings of characters. You can use the TagResource action with a resource that already has +tags. If you specify a new tag key, this tag is appended to the list of tags associated +with the resource. If you specify a tag key that is already associated with the resource, +the new tag value that you specify replaces the previous value for that tag. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to view tags. +- `tags`: The tags used to organize, track, or control access for this resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags from the specified Entity Resolution resource. In Entity +Resolution, SchemaMapping, and MatchingWorkflow can be tagged. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to untag. +- `tag_keys`: The list of tag keys to remove from the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + +Updates an existing IdMappingWorkflow. This method is identical to CreateIdMappingWorkflow, +except it uses an HTTP PUT request instead of a POST request, and the IdMappingWorkflow +must already exist for the method to succeed. + +# Arguments +- `id_mapping_techniques`: An object which defines the idMappingType and the + providerProperties. +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access Amazon Web Services resources on your behalf. +- `workflow_name`: The name of the workflow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"outputSourceConfig"`: A list of OutputSource objects, each of which contains fields + OutputS3Path and KMSArn. +""" +function update_id_mapping_workflow( + idMappingTechniques, + inputSourceConfig, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/idmappingworkflows/$(workflowName)", + Dict{String,Any}( + "idMappingTechniques" => idMappingTechniques, + "inputSourceConfig" => inputSourceConfig, + "roleArn" => roleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_mapping_workflow( + idMappingTechniques, + inputSourceConfig, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/idmappingworkflows/$(workflowName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "idMappingTechniques" => idMappingTechniques, + "inputSourceConfig" => inputSourceConfig, + "roleArn" => roleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_namespace(id_namespace_name) + update_id_namespace(id_namespace_name, params::Dict{String,<:Any}) + +Updates an existing ID namespace. + +# Arguments +- `id_namespace_name`: The name of the ID namespace. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the ID namespace. +- `"idMappingWorkflowProperties"`: Determines the properties of IdMappingWorkflow where + this IdNamespace can be used as a Source or a Target. +- `"inputSourceConfig"`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access the resources defined in this IdNamespace on your behalf as part of a + workflow run. +""" +function update_id_namespace( + idNamespaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "PUT", + "/idnamespaces/$(idNamespaceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_namespace( + idNamespaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/idnamespaces/$(idNamespaceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name) + update_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name, params::Dict{String,<:Any}) + +Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, +except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow must +already exist for the method to succeed. + +# Arguments +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `output_source_config`: A list of OutputSource objects, each of which contains fields + OutputS3Path, ApplyNormalization, and Output. +- `resolution_techniques`: An object which defines the resolutionType and the + ruleBasedProperties. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. +- `workflow_name`: The name of the workflow to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"incrementalRunConfig"`: An object which defines an incremental run type and has only + incrementalRunType as a field. +""" +function update_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/matchingworkflows/$(workflowName)", + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/matchingworkflows/$(workflowName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_schema_mapping(mapped_input_fields, schema_name) + update_schema_mapping(mapped_input_fields, schema_name, params::Dict{String,<:Any}) + +Updates a schema mapping. A schema is immutable if it is being used by a workflow. +Therefore, you can't update a schema mapping if it's associated with a workflow. + +# Arguments +- `mapped_input_fields`: A list of MappedInputFields. Each MappedInputField corresponds to + a column the source data table, and contains column name plus additional information that + Entity Resolution uses for matching. +- `schema_name`: The name of the schema. There can't be multiple SchemaMappings with the + same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the schema. +""" +function update_schema_mapping( + mappedInputFields, schemaName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "PUT", + "/schemas/$(schemaName)", + Dict{String,Any}("mappedInputFields" => mappedInputFields); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_schema_mapping( + mappedInputFields, + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/schemas/$(schemaName)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("mappedInputFields" => mappedInputFields), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/eventbridge.jl b/src/services/eventbridge.jl index a1af643c51..55732c82b0 100644 --- a/src/services/eventbridge.jl +++ b/src/services/eventbridge.jl @@ -72,7 +72,8 @@ end create_api_destination(connection_arn, http_method, invocation_endpoint, name, params::Dict{String,<:Any}) Creates an API destination, which is an HTTP invocation endpoint configured as a target for -events. +events. API destinations do not support private destinations, such as interface VPC +endpoints. For more information, see API destinations in the EventBridge User Guide. # Arguments - `connection_arn`: The ARN of the connection to use for the API destination. The @@ -141,7 +142,14 @@ Creates an archive of events with the specified settings. When you create an arc incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events -are not sent to an archive. +are not sent to an archive. Archives and schema discovery are not supported for event +buses encrypted using a customer managed key. EventBridge returns an error if: You call +CreateArchive on an event bus set to use a customer managed key for encryption. You call + CreateDiscoverer on an event bus set to use a customer managed key for encryption. You +call UpdatedEventBus to set a customer managed key on an event bus with an archives or +schema discovery enabled. To enable archives or schema discovery on an event bus, choose +to use an Amazon Web Services owned key. For more information, see Data encryption in +EventBridge in the Amazon EventBridge User Guide. # Arguments - `archive_name`: The name for the archive to create. @@ -327,8 +335,23 @@ event bus which can be matched to a partner event source. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeadLetterConfig"`: +- `"Description"`: The event bus description. - `"EventSourceName"`: If you are creating a partner event bus, this specifies the partner event source that the new event bus will be matched with. +- `"KmsKeyIdentifier"`: The identifier of the KMS customer managed key for EventBridge to + use, if you choose to use a customer managed key to encrypt events on this event bus. The + identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. + If you do not specify a customer managed key identifier, EventBridge uses an Amazon Web + Services owned key to encrypt events on the event bus. For more information, see Managing + keys in the Key Management Service Developer Guide. Archives and schema discovery are not + supported for event buses encrypted using a customer managed key. EventBridge returns an + error if: You call CreateArchive on an event bus set to use a customer managed key for + encryption. You call CreateDiscoverer on an event bus set to use a customer managed key + for encryption. You call UpdatedEventBus to set a customer managed key on an event bus + with an archives or schema discovery enabled. To enable archives or schema discovery on + an event bus, choose to use an Amazon Web Services owned key. For more information, see + Data encryption in EventBridge in the Amazon EventBridge User Guide. - `"Tags"`: Tags to associate with the event bus. """ function create_event_bus(Name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -363,13 +386,15 @@ based on resources within the SaaS partner's service or application. An Amazon W account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using Amazon Web Services Events rules and targets. Partner event source names follow this format: -partner_name/event_namespace/event_name partner_name is determined during partner -registration and identifies the partner to Amazon Web Services customers. event_namespace -is determined by the partner and is a way for the partner to categorize their events. -event_name is determined by the partner, and should uniquely identify an event-generating -resource within the partner system. The combination of event_namespace and event_name -should help Amazon Web Services customers decide whether to create an event bus to receive -these events. +partner_name/event_namespace/event_name partner_name is determined during partner +registration, and identifies the partner to Amazon Web Services customers. +event_namespace is determined by the partner, and is a way for the partner to categorize +their events. event_name is determined by the partner, and should uniquely identify an +event-generating resource within the partner system. The event_name must be unique across +all Amazon Web Services customers. This is because the event source is a shared resource +between the partner and customer accounts, and each partner event source unique in the +partner account. The combination of event_namespace and event_name should help Amazon Web +Services customers decide whether to create an event bus to receive these events. # Arguments - `account`: The Amazon Web Services account ID that is permitted to create a matching @@ -568,7 +593,7 @@ end Delete an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the -Amazon EventBridge User Guide. +Amazon EventBridge User Guide . # Arguments - `name`: The name of the endpoint you want to delete. For example, @@ -809,7 +834,7 @@ end Get the information about an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event -replication in the Amazon EventBridge User Guide.. +replication in the Amazon EventBridge User Guide . # Arguments - `name`: The name of the endpoint you want to get information about. For example, @@ -1167,7 +1192,7 @@ end List the global endpoints associated with this account. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event -replication in the Amazon EventBridge User Guide.. +replication in the Amazon EventBridge User Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1380,7 +1405,8 @@ end list_rule_names_by_target(target_arn, params::Dict{String,<:Any}) Lists the rules for the specified target. You can see which of the rules in Amazon -EventBridge can invoke a specific target in your account. +EventBridge can invoke a specific target in your account. The maximum number of results per +page for requests is 100. # Arguments - `target_arn`: The Amazon Resource Name (ARN) of the target resource. @@ -1422,8 +1448,9 @@ end list_rules(params::Dict{String,<:Any}) Lists your Amazon EventBridge rules. You can either list all the rules or you can provide a -prefix to match to the rule names. ListRules does not list the targets of a rule. To see -the targets associated with a rule, use ListTargetsByRule. +prefix to match to the rule names. The maximum number of results per page for requests is +100. ListRules does not list the targets of a rule. To see the targets associated with a +rule, use ListTargetsByRule. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1484,7 +1511,8 @@ end list_targets_by_rule(rule) list_targets_by_rule(rule, params::Dict{String,<:Any}) -Lists the targets assigned to the specified rule. +Lists the targets assigned to the specified rule. The maximum number of results per page +for requests is 100. # Arguments - `rule`: The name of the rule. @@ -1519,8 +1547,13 @@ end put_events(entries) put_events(entries, params::Dict{String,<:Any}) -Sends custom events to Amazon EventBridge so that they can be matched to rules. PutEvents -will only process nested JSON up to 1100 levels deep. +Sends custom events to Amazon EventBridge so that they can be matched to rules. The maximum +size for a PutEvents event entry is 256 KB. Entry size is calculated including the event +and any necessary characters and keys of the JSON representation of the event. To learn +more, see Calculating PutEvents event entry size in the Amazon EventBridge User Guide +PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the +constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of +9,223,372,036,854,775,807. PutEvents will only process nested JSON up to 1100 levels deep. # Arguments - `entries`: The entry that defines an event in your system. You can specify several @@ -1557,7 +1590,8 @@ end put_partner_events(entries, params::Dict{String,<:Any}) This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web -Services customers do not use this operation. +Services customers do not use this operation. For information on calculating event batch +size, see Calculating EventBridge PutEvents event entry size in the EventBridge User Guide. # Arguments - `entries`: The list of events to write to the event bus. @@ -1689,7 +1723,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"EventBusName"`: The name or ARN of the event bus to associate with this rule. If you omit this, the default event bus is used. - `"EventPattern"`: The event pattern. For more information, see Amazon EventBridge event - patterns in the Amazon EventBridge User Guide. + patterns in the Amazon EventBridge User Guide . - `"RoleArn"`: The Amazon Resource Name (ARN) of the IAM role associated with the rule. If you're setting an event bus in another account as the target and that account granted permission to your account through an organization instead of directly by the account ID, @@ -1697,7 +1731,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in this parameter. - `"ScheduleExpression"`: The scheduling expression. For example, \"cron(0 20 * * ? *)\" or \"rate(5 minutes)\". -- `"State"`: Indicates whether the rule is enabled or disabled. +- `"State"`: The state of the rule. Valid values include: DISABLED: The rule is + disabled. EventBridge does not match any events against the rule. ENABLED: The rule is + enabled. EventBridge matches events against the rule, except for Amazon Web Services + management events delivered through CloudTrail. + ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS: The rule is enabled for all events, + including Amazon Web Services management events delivered through CloudTrail. Management + events provide visibility into management operations that are performed on resources in + your Amazon Web Services account. These are also known as control plane operations. For + more information, see Logging management events in the CloudTrail User Guide, and Filtering + management events from Amazon Web Services services in the Amazon EventBridge User Guide . + This value is only valid for rules on the default event bus or custom event buses. It does + not apply to partner event buses. - `"Tags"`: The list of key-value pairs to associate with the rule. """ function put_rule(Name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1725,28 +1770,22 @@ end Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule. Targets are the resources that are invoked when a rule is -triggered. Each rule can have up to five (5) targets associated with it at one time. You -can configure the following as targets for Events: API destination API Gateway -Batch job queue CloudWatch group CodeBuild project CodePipeline EC2 CreateSnapshot -API call EC2 Image Builder EC2 RebootInstances API call EC2 StopInstances API call -EC2 TerminateInstances API call ECS task Event bus in a different account or Region - Event bus in the same account and Region Firehose delivery stream Glue workflow -Incident Manager response plan Inspector assessment template Kinesis stream Lambda -function Redshift cluster Redshift Serverless workgroup SageMaker Pipeline SNS -topic SQS queue Step Functions state machine Systems Manager Automation Systems -Manager OpsItem Systems Manager Run Command Creating rules with built-in targets is -supported only in the Amazon Web Services Management Console. The built-in targets are EC2 -CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 -TerminateInstances API call. For some target types, PutTargets provides target-specific +triggered. The maximum number of entries per request is 10. Each rule can have up to five +(5) targets associated with it at one time. For a list of services you can configure as +targets for events, see EventBridge targets in the Amazon EventBridge User Guide . +Creating rules with built-in targets is supported only in the Amazon Web Services +Management Console. The built-in targets are: Amazon EBS CreateSnapshot API call +Amazon EC2 RebootInstances API call Amazon EC2 StopInstances API call Amazon EC2 +TerminateInstances API call For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field. To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate -permissions. For Lambda and Amazon SNS resources, EventBridge relies on resource-based -policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API +permissions: For Lambda and Amazon SNS resources, EventBridge relies on resource-based +policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in -PutTargets. For more information, see Authentication and Access Control in the Amazon -EventBridge User Guide. If another Amazon Web Services account is in the same region and +PutTargets. For more information, see Authentication and Access Control in the Amazon +EventBridge User Guide . If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run @@ -1759,24 +1798,25 @@ event bus of another account as the target, and that account granted permission account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon -EventBridge User Guide. For more information about enabling cross-account events, see -PutPermission. Input, InputPath, and InputTransformer are mutually exclusive and optional -parameters of a target. When a rule is triggered due to a matched event: If none of the -following arguments are specified for a target, then the entire event is passed to the -target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in -which case nothing from the event is passed to the target). If Input is specified in the -form of valid JSON, then the matched event is overridden with this constant. If InputPath -is specified in the form of JSONPath (for example, .detail), then only the part of the -event specified in the path is passed to the target (for example, only the detail part of -the event is passed). If InputTransformer is specified, then one or more specified -JSONPaths are extracted from the event and used as values in a template that you specify as -the input to the target. When you specify InputPath or InputTransformer, you must use -JSON dot notation, not bracket notation. When you add targets to a rule and the associated -rule triggers soon after, new or updated targets might not be immediately invoked. Allow a -short period of time for changes to take effect. This action can partially fail if too many -requests are made at the same time. If that happens, FailedEntryCount is non-zero in the -response and each entry in FailedEntries provides the ID of the failed target and the error -code. +EventBridge User Guide. If you have an IAM role on a cross-account event bus target, a +PutTargets call without a role on the same target (same Id and Arn) will not remove the +role. For more information about enabling cross-account events, see PutPermission. Input, +InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. +When a rule is triggered due to a matched event: If none of the following arguments are +specified for a target, then the entire event is passed to the target in JSON format +(unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from +the event is passed to the target). If Input is specified in the form of valid JSON, then +the matched event is overridden with this constant. If InputPath is specified in the form +of JSONPath (for example, .detail), then only the part of the event specified in the path +is passed to the target (for example, only the detail part of the event is passed). If +InputTransformer is specified, then one or more specified JSONPaths are extracted from the +event and used as values in a template that you specify as the input to the target. When +you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket +notation. When you add targets to a rule and the associated rule triggers soon after, new +or updated targets might not be immediately invoked. Allow a short period of time for +changes to take effect. This action can partially fail if too many requests are made at the +same time. If that happens, FailedEntryCount is non-zero in the response and each entry in +FailedEntries provides the ID of the failed target and the error code. # Arguments - `rule`: The name of the rule. @@ -1854,7 +1894,8 @@ request are removed. When you remove a target, when the associated rule trigger targets might continue to be invoked. Allow a short period of time for changes to take effect. This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries -provides the ID of the failed target and the error code. +provides the ID of the failed target and the error code. The maximum number of entries per +request is 10. # Arguments - `ids`: The IDs of the targets to remove from the rule. @@ -2033,7 +2074,7 @@ the event you want to match. follow the format specified in Amazon Web Services Events, and the following fields are mandatory: id account source time region resources detail-type - `event_pattern`: The event pattern. For more information, see Events and Event Patterns - in the Amazon EventBridge User Guide. + in the Amazon EventBridge User Guide . """ function test_event_pattern( @@ -2223,7 +2264,7 @@ end Update an existing endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the -Amazon EventBridge User Guide.. +Amazon EventBridge User Guide . # Arguments - `name`: The name of the endpoint you want to update. @@ -2255,3 +2296,41 @@ function update_endpoint( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_event_bus() + update_event_bus(params::Dict{String,<:Any}) + +Updates the specified event bus. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeadLetterConfig"`: +- `"Description"`: The event bus description. +- `"KmsKeyIdentifier"`: The identifier of the KMS customer managed key for EventBridge to + use, if you choose to use a customer managed key to encrypt events on this event bus. The + identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. + If you do not specify a customer managed key identifier, EventBridge uses an Amazon Web + Services owned key to encrypt events on the event bus. For more information, see Managing + keys in the Key Management Service Developer Guide. Archives and schema discovery are not + supported for event buses encrypted using a customer managed key. EventBridge returns an + error if: You call CreateArchive on an event bus set to use a customer managed key for + encryption. You call CreateDiscoverer on an event bus set to use a customer managed key + for encryption. You call UpdatedEventBus to set a customer managed key on an event bus + with an archives or schema discovery enabled. To enable archives or schema discovery on + an event bus, choose to use an Amazon Web Services owned key. For more information, see + Data encryption in EventBridge in the Amazon EventBridge User Guide. +- `"Name"`: The name of the event bus. +""" +function update_event_bus(; aws_config::AbstractAWSConfig=global_aws_config()) + return eventbridge( + "UpdateEventBus"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_event_bus( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return eventbridge( + "UpdateEventBus", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end diff --git a/src/services/finspace.jl b/src/services/finspace.jl index 52f3ac0447..996107228b 100644 --- a/src/services/finspace.jl +++ b/src/services/finspace.jl @@ -60,19 +60,26 @@ files by using an ordered list of change requests. # Arguments - `change_requests`: A list of change request objects that are run in order. A change - request object consists of changeType , s3Path, and a dbPath. A changeType can has the + request object consists of changeType , s3Path, and dbPath. A changeType can have the following values: PUT – Adds or updates files in a database. DELETE – Deletes files in a database. All the change requests require a mandatory dbPath attribute that - defines the path within the database directory. The s3Path attribute defines the s3 source - file path and is required for a PUT change type. Here is an example of how you can use the - change request object: [ { \"changeType\": \"PUT\", - \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}, { \"changeType\": - \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}, { \"changeType\": - \"DELETE\", \"dbPath\": \"/2020.01.01/\"} ] In this example, the first request with PUT - change type allows you to add files in the given s3Path under the 2020.01.02 partition of - the database. The second request with PUT change type allows you to add a single sym file - at database root location. The last request with DELETE change type allows you to delete - the files under the 2020.01.01 partition of the database. + defines the path within the database directory. All database paths must start with a + leading / and end with a trailing /. The s3Path attribute defines the s3 source file path + and is required for a PUT change type. The s3path must end with a trailing / if it is a + directory and must end without a trailing / if it is a file. Here are few examples of how + you can use the change request object: This request adds a single sym file at database + root location. { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", + \"dbPath\":\"/\"} This request adds files in the given s3Path under the 2020.01.02 + partition of the database. { \"changeType\": \"PUT\", + \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"} This request + adds files in the given s3Path under the taq table partition of the database. [ { + \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", + \"dbPath\":\"/2020.01.02/taq/\"}] This request deletes the 2020.01.02 partition of the + database. [{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ] The DELETE + request allows you to delete the existing files under the 2020.01.02 partition of the + database, and the PUT request adds a new taq table under it. [ {\"changeType\": + \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\", + \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}] - `client_token`: A token that ensures idempotency. This token expires in 10 minutes. - `database_name`: The name of the kdb database. - `environment_id`: A unique identifier of the kdb environment. @@ -119,8 +126,8 @@ function create_kx_changeset( end """ - create_kx_cluster(az_mode, capacity_configuration, cluster_name, cluster_type, environment_id, release_label) - create_kx_cluster(az_mode, capacity_configuration, cluster_name, cluster_type, environment_id, release_label, params::Dict{String,<:Any}) + create_kx_cluster(az_mode, cluster_name, cluster_type, environment_id, release_label, vpc_configuration) + create_kx_cluster(az_mode, cluster_name, cluster_type, environment_id, release_label, vpc_configuration, params::Dict{String,<:Any}) Creates a new kdb cluster. @@ -128,9 +135,6 @@ Creates a new kdb cluster. - `az_mode`: The number of availability zones you want to assign per cluster. This can be one of the following SINGLE – Assigns one availability zone per cluster. MULTI – Assigns all the availability zones per cluster. -- `capacity_configuration`: A structure for the metadata of a cluster. It includes - information about like the CPUs needed, memory of instances, number of instances, and the - port used while establishing a connection. - `cluster_name`: A unique name for the cluster that you want to create. - `cluster_type`: Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with @@ -142,9 +146,19 @@ Creates a new kdb cluster. must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does - not require a writable local storage. + not require a writable local storage. GP – A general purpose cluster allows you to + quickly iterate on code during development by granting greater access to system commands + and enabling a fast reload of custom code. This cluster type can optionally mount databases + including cache and savedown storage. For this cluster type, the node count is fixed at 1. + It does not support autoscaling and supports only SINGLE AZ mode. Tickerplant – A + tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It + can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can + persist messages to log, which is readable by any RDB environment. It supports only + single-node that is only one kdb process. - `environment_id`: A unique identifier for the kdb environment. - `release_label`: The version of FinSpace managed kdb to run. +- `vpc_configuration`: Configuration details about the network where the Privatelink + endpoint of the cluster resides. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -154,6 +168,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"cacheStorageConfigurations"`: The configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store. +- `"capacityConfiguration"`: A structure for the metadata of a cluster. It includes + information like the CPUs needed, memory of instances, and number of instances. - `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. - `"clusterDescription"`: A description of the cluster. - `"code"`: The details of the custom code that you want to use inside a cluster when @@ -171,18 +187,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to hold data during the savedown process. This parameter is required when you choose clusterType as RDB. All the data written to this storage space is lost when the cluster node is restarted. +- `"scalingGroupConfiguration"`: The structure that stores the configuration details of a + scaling group. - `"tags"`: A list of key-value pairs to label the cluster. You can add up to 50 tags to a cluster. -- `"vpcConfiguration"`: Configuration details about the network where the Privatelink - endpoint of the cluster resides. +- `"tickerplantLogConfiguration"`: A configuration to store Tickerplant logs. It consists + of a list of volumes that will be mounted to your cluster. For the cluster type + Tickerplant, the location of the TP volume on the cluster will be available by using the + global variable .aws.tp_log_path. """ function create_kx_cluster( azMode, - capacityConfiguration, clusterName, clusterType, environmentId, - releaseLabel; + releaseLabel, + vpcConfiguration; aws_config::AbstractAWSConfig=global_aws_config(), ) return finspace( @@ -190,10 +210,10 @@ function create_kx_cluster( "/kx/environments/$(environmentId)/clusters", Dict{String,Any}( "azMode" => azMode, - "capacityConfiguration" => capacityConfiguration, "clusterName" => clusterName, "clusterType" => clusterType, "releaseLabel" => releaseLabel, + "vpcConfiguration" => vpcConfiguration, "clientToken" => string(uuid4()), ); aws_config=aws_config, @@ -202,11 +222,11 @@ function create_kx_cluster( end function create_kx_cluster( azMode, - capacityConfiguration, clusterName, clusterType, environmentId, releaseLabel, + vpcConfiguration, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -218,10 +238,10 @@ function create_kx_cluster( _merge, Dict{String,Any}( "azMode" => azMode, - "capacityConfiguration" => capacityConfiguration, "clusterName" => clusterName, "clusterType" => clusterType, "releaseLabel" => releaseLabel, + "vpcConfiguration" => vpcConfiguration, "clientToken" => string(uuid4()), ), params, @@ -287,6 +307,94 @@ function create_kx_database( ) end +""" + create_kx_dataview(az_mode, client_token, database_name, dataview_name, environment_id) + create_kx_dataview(az_mode, client_token, database_name, dataview_name, environment_id, params::Dict{String,<:Any}) + + Creates a snapshot of kdb database with tiered storage capabilities and a pre-warmed +cache, ready for mounting on kdb clusters. Dataviews are only available for clusters +running on a scaling group. They are not supported on dedicated clusters. + +# Arguments +- `az_mode`: The number of availability zones you want to assign per volume. Currently, + FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. +- `client_token`: A token that ensures idempotency. This token expires in 10 minutes. +- `database_name`: The name of the database where you want to create a dataview. +- `dataview_name`: A unique identifier for the dataview. +- `environment_id`: A unique identifier for the kdb environment, where you want to create + the dataview. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoUpdate"`: The option to specify whether you want to apply all the future additions + and corrections automatically to the dataview, when you ingest new changesets. The default + value is false. +- `"availabilityZoneId"`: The identifier of the availability zones. +- `"changesetId"`: A unique identifier of the changeset that you want to use to ingest + data. +- `"description"`: A description of the dataview. +- `"readWrite"`: The option to specify whether you want to make the dataview writable to + perform database maintenance. The following are some considerations related to writable + dataviews.

 You cannot create partial writable dataviews. When you create + writeable dataviews you must provide the entire database path. You cannot perform updates + on a writeable dataview. Hence, autoUpdate must be set as False if readWrite is True for a + dataview. You must also use a unique volume for creating a writeable dataview. So, if you + choose a volume that is already in use by another dataview, the dataview creation fails. + Once you create a dataview as writeable, you cannot change it to read-only. So, you cannot + update the readWrite parameter later. +- `"segmentConfigurations"`: The configuration that contains the database path of the data + that you want to place on each selected volume. Each segment must have a unique database + path for each volume. If you do not explicitly specify any database path for a volume, they + are accessible from the cluster through the default S3/object store segment. +- `"tags"`: A list of key-value pairs to label the dataview. You can add up to 50 tags to + a dataview. +""" +function create_kx_dataview( + azMode, + clientToken, + databaseName, + dataviewName, + environmentId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews", + Dict{String,Any}( + "azMode" => azMode, "clientToken" => clientToken, "dataviewName" => dataviewName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_kx_dataview( + azMode, + clientToken, + databaseName, + dataviewName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "azMode" => azMode, + "clientToken" => clientToken, + "dataviewName" => dataviewName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_kx_environment(kms_key_id, name) create_kx_environment(kms_key_id, name, params::Dict{String,<:Any}) @@ -310,7 +418,9 @@ function create_kx_environment( return finspace( "POST", "/kx/environments", - Dict{String,Any}("kmsKeyId" => kmsKeyId, "name" => name); + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, "name" => name, "clientToken" => string(uuid4()) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -326,7 +436,87 @@ function create_kx_environment( "/kx/environments", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("kmsKeyId" => kmsKeyId, "name" => name), params + _merge, + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, "name" => name, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_kx_scaling_group(availability_zone_id, client_token, environment_id, host_type, scaling_group_name) + create_kx_scaling_group(availability_zone_id, client_token, environment_id, host_type, scaling_group_name, params::Dict{String,<:Any}) + +Creates a new scaling group. + +# Arguments +- `availability_zone_id`: The identifier of the availability zones. +- `client_token`: A token that ensures idempotency. This token expires in 10 minutes. +- `environment_id`: A unique identifier for the kdb environment, where you want to create + the scaling group. +- `host_type`: The memory and CPU capabilities of the scaling group host on which FinSpace + Managed kdb clusters will be placed. You can add one of the following values: + kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 vCPUs. + kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 vCPUs. + kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 vCPUs. + kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and 128 vCPUs. + kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and 64 vCPUs. + kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and 96 vCPUs. +- `scaling_group_name`: A unique identifier for the kdb scaling group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: A list of key-value pairs to label the scaling group. You can add up to 50 + tags to a scaling group. +""" +function create_kx_scaling_group( + availabilityZoneId, + clientToken, + environmentId, + hostType, + scalingGroupName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/scalingGroups", + Dict{String,Any}( + "availabilityZoneId" => availabilityZoneId, + "clientToken" => clientToken, + "hostType" => hostType, + "scalingGroupName" => scalingGroupName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_kx_scaling_group( + availabilityZoneId, + clientToken, + environmentId, + hostType, + scalingGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/scalingGroups", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "availabilityZoneId" => availabilityZoneId, + "clientToken" => clientToken, + "hostType" => hostType, + "scalingGroupName" => scalingGroupName, + ), + params, ), ); aws_config=aws_config, @@ -358,7 +548,9 @@ function create_kx_user( return finspace( "POST", "/kx/environments/$(environmentId)/users", - Dict{String,Any}("iamRole" => iamRole, "userName" => userName); + Dict{String,Any}( + "iamRole" => iamRole, "userName" => userName, "clientToken" => string(uuid4()) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -376,7 +568,88 @@ function create_kx_user( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("iamRole" => iamRole, "userName" => userName), + Dict{String,Any}( + "iamRole" => iamRole, + "userName" => userName, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_kx_volume(availability_zone_ids, az_mode, environment_id, volume_name, volume_type) + create_kx_volume(availability_zone_ids, az_mode, environment_id, volume_name, volume_type, params::Dict{String,<:Any}) + + Creates a new volume with a specific amount of throughput and storage capacity. + +# Arguments +- `availability_zone_ids`: The identifier of the availability zones. +- `az_mode`: The number of availability zones you want to assign per volume. Currently, + FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. +- `environment_id`: A unique identifier for the kdb environment, whose clusters can attach + to the volume. +- `volume_name`: A unique identifier for the volume. +- `volume_type`: The type of file system volume. Currently, FinSpace only supports NAS_1 + volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +- `"description"`: A description of the volume. +- `"nas1Configuration"`: Specifies the configuration for the Network attached storage + (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +- `"tags"`: A list of key-value pairs to label the volume. You can add up to 50 tags to a + volume. +""" +function create_kx_volume( + availabilityZoneIds, + azMode, + environmentId, + volumeName, + volumeType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/kxvolumes", + Dict{String,Any}( + "availabilityZoneIds" => availabilityZoneIds, + "azMode" => azMode, + "volumeName" => volumeName, + "volumeType" => volumeType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_kx_volume( + availabilityZoneIds, + azMode, + environmentId, + volumeName, + volumeType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "POST", + "/kx/environments/$(environmentId)/kxvolumes", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "availabilityZoneIds" => availabilityZoneIds, + "azMode" => azMode, + "volumeName" => volumeName, + "volumeType" => volumeType, + "clientToken" => string(uuid4()), + ), params, ), ); @@ -461,6 +734,44 @@ function delete_kx_cluster( ) end +""" + delete_kx_cluster_node(cluster_name, environment_id, node_id) + delete_kx_cluster_node(cluster_name, environment_id, node_id, params::Dict{String,<:Any}) + +Deletes the specified nodes from a cluster. + +# Arguments +- `cluster_name`: The name of the cluster, for which you want to delete the nodes. +- `environment_id`: A unique identifier for the kdb environment. +- `node_id`: A unique identifier for the node that you want to delete. + +""" +function delete_kx_cluster_node( + clusterName, environmentId, nodeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/clusters/$(clusterName)/nodes/$(nodeId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_kx_cluster_node( + clusterName, + environmentId, + nodeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/clusters/$(clusterName)/nodes/$(nodeId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_kx_database(client_token, database_name, environment_id) delete_kx_database(client_token, database_name, environment_id, params::Dict{String,<:Any}) @@ -506,6 +817,55 @@ function delete_kx_database( ) end +""" + delete_kx_dataview(client_token, database_name, dataview_name, environment_id) + delete_kx_dataview(client_token, database_name, dataview_name, environment_id, params::Dict{String,<:Any}) + + Deletes the specified dataview. Before deleting a dataview, make sure that it is not in +use by any cluster. + +# Arguments +- `client_token`: A token that ensures idempotency. This token expires in 10 minutes. +- `database_name`: The name of the database whose dataview you want to delete. +- `dataview_name`: The name of the dataview that you want to delete. +- `environment_id`: A unique identifier for the kdb environment, from where you want to + delete the dataview. + +""" +function delete_kx_dataview( + clientToken, + databaseName, + dataviewName, + environmentId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)", + Dict{String,Any}("clientToken" => clientToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_kx_dataview( + clientToken, + databaseName, + dataviewName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => clientToken), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_kx_environment(environment_id) delete_kx_environment(environment_id, params::Dict{String,<:Any}) @@ -516,13 +876,17 @@ remove all the associated data and any services running in it. # Arguments - `environment_id`: A unique identifier for the kdb environment. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. """ function delete_kx_environment( environmentId; aws_config::AbstractAWSConfig=global_aws_config() ) return finspace( "DELETE", - "/kx/environments/$(environmentId)"; + "/kx/environments/$(environmentId)", + Dict{String,Any}("clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -535,7 +899,53 @@ function delete_kx_environment( return finspace( "DELETE", "/kx/environments/$(environmentId)", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_kx_scaling_group(environment_id, scaling_group_name) + delete_kx_scaling_group(environment_id, scaling_group_name, params::Dict{String,<:Any}) + + Deletes the specified scaling group. This action is irreversible. You cannot delete a +scaling group until all the clusters running on it have been deleted. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment, from where you want to + delete the dataview. +- `scaling_group_name`: A unique identifier for the kdb scaling group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +""" +function delete_kx_scaling_group( + environmentId, scalingGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/scalingGroups/$(scalingGroupName)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_kx_scaling_group( + environmentId, + scalingGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/scalingGroups/$(scalingGroupName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -551,13 +961,17 @@ Deletes a user in the specified kdb environment. - `environment_id`: A unique identifier for the kdb environment. - `user_name`: A unique identifier for the user that you want to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. """ function delete_kx_user( environmentId, userName; aws_config::AbstractAWSConfig=global_aws_config() ) return finspace( "DELETE", - "/kx/environments/$(environmentId)/users/$(userName)"; + "/kx/environments/$(environmentId)/users/$(userName)", + Dict{String,Any}("clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -571,7 +985,54 @@ function delete_kx_user( return finspace( "DELETE", "/kx/environments/$(environmentId)/users/$(userName)", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_kx_volume(environment_id, volume_name) + delete_kx_volume(environment_id, volume_name, params::Dict{String,<:Any}) + + Deletes a volume. You can only delete a volume if it's not attached to a cluster or a +dataview. When a volume is deleted, any data on the volume is lost. This action is +irreversible. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment, whose clusters can attach + to the volume. +- `volume_name`: The name of the volume that you want to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +""" +function delete_kx_volume( + environmentId, volumeName; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_kx_volume( + environmentId, + volumeName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "DELETE", + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -735,35 +1196,147 @@ function get_kx_connection_string( end """ - get_kx_database(database_name, environment_id) - get_kx_database(database_name, environment_id, params::Dict{String,<:Any}) + get_kx_database(database_name, environment_id) + get_kx_database(database_name, environment_id, params::Dict{String,<:Any}) + +Returns database information for the specified environment ID. + +# Arguments +- `database_name`: The name of the kdb database. +- `environment_id`: A unique identifier for the kdb environment. + +""" +function get_kx_database( + databaseName, environmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_kx_database( + databaseName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_kx_dataview(database_name, dataview_name, environment_id) + get_kx_dataview(database_name, dataview_name, environment_id, params::Dict{String,<:Any}) + + Retrieves details of the dataview. + +# Arguments +- `database_name`: The name of the database where you created the dataview. +- `dataview_name`: A unique identifier for the dataview. +- `environment_id`: A unique identifier for the kdb environment, from where you want to + retrieve the dataview details. + +""" +function get_kx_dataview( + databaseName, + dataviewName, + environmentId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_kx_dataview( + databaseName, + dataviewName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_kx_environment(environment_id) + get_kx_environment(environment_id, params::Dict{String,<:Any}) + +Retrieves all the information for the specified kdb environment. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment. + +""" +function get_kx_environment( + environmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "GET", + "/kx/environments/$(environmentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_kx_environment( + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_kx_scaling_group(environment_id, scaling_group_name) + get_kx_scaling_group(environment_id, scaling_group_name, params::Dict{String,<:Any}) -Returns database information for the specified environment ID. + Retrieves details of a scaling group. # Arguments -- `database_name`: The name of the kdb database. - `environment_id`: A unique identifier for the kdb environment. +- `scaling_group_name`: A unique identifier for the kdb scaling group. """ -function get_kx_database( - databaseName, environmentId; aws_config::AbstractAWSConfig=global_aws_config() +function get_kx_scaling_group( + environmentId, scalingGroupName; aws_config::AbstractAWSConfig=global_aws_config() ) return finspace( "GET", - "/kx/environments/$(environmentId)/databases/$(databaseName)"; + "/kx/environments/$(environmentId)/scalingGroups/$(scalingGroupName)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_kx_database( - databaseName, +function get_kx_scaling_group( environmentId, + scalingGroupName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return finspace( "GET", - "/kx/environments/$(environmentId)/databases/$(databaseName)", + "/kx/environments/$(environmentId)/scalingGroups/$(scalingGroupName)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -771,33 +1344,35 @@ function get_kx_database( end """ - get_kx_environment(environment_id) - get_kx_environment(environment_id, params::Dict{String,<:Any}) + get_kx_user(environment_id, user_name) + get_kx_user(environment_id, user_name, params::Dict{String,<:Any}) -Retrieves all the information for the specified kdb environment. +Retrieves information about the specified kdb user. # Arguments - `environment_id`: A unique identifier for the kdb environment. +- `user_name`: A unique identifier for the user. """ -function get_kx_environment( - environmentId; aws_config::AbstractAWSConfig=global_aws_config() +function get_kx_user( + environmentId, userName; aws_config::AbstractAWSConfig=global_aws_config() ) return finspace( "GET", - "/kx/environments/$(environmentId)"; + "/kx/environments/$(environmentId)/users/$(userName)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_kx_environment( +function get_kx_user( environmentId, + userName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return finspace( "GET", - "/kx/environments/$(environmentId)", + "/kx/environments/$(environmentId)/users/$(userName)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -805,35 +1380,36 @@ function get_kx_environment( end """ - get_kx_user(environment_id, user_name) - get_kx_user(environment_id, user_name, params::Dict{String,<:Any}) + get_kx_volume(environment_id, volume_name) + get_kx_volume(environment_id, volume_name, params::Dict{String,<:Any}) -Retrieves information about the specified kdb user. + Retrieves the information about the volume. # Arguments -- `environment_id`: A unique identifier for the kdb environment. -- `user_name`: A unique identifier for the user. +- `environment_id`: A unique identifier for the kdb environment, whose clusters can attach + to the volume. +- `volume_name`: A unique identifier for the volume. """ -function get_kx_user( - environmentId, userName; aws_config::AbstractAWSConfig=global_aws_config() +function get_kx_volume( + environmentId, volumeName; aws_config::AbstractAWSConfig=global_aws_config() ) return finspace( "GET", - "/kx/environments/$(environmentId)/users/$(userName)"; + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_kx_user( +function get_kx_volume( environmentId, - userName, + volumeName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return finspace( "GET", - "/kx/environments/$(environmentId)/users/$(userName)", + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -971,7 +1547,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does - not require a writable local storage. + not require a writable local storage. GP – A general purpose cluster allows you to + quickly iterate on code during development by granting greater access to system commands + and enabling a fast reload of custom code. This cluster type can optionally mount databases + including cache and savedown storage. For this cluster type, the node count is fixed at 1. + It does not support autoscaling and supports only SINGLE AZ mode. Tickerplant – A + tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It + can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can + persist messages to log, which is readable by any RDB environment. It supports only + single-node that is only one kdb process. - `"maxResults"`: The maximum number of results to return in this request. - `"nextToken"`: A token that indicates where a results page should begin. """ @@ -1033,6 +1617,47 @@ function list_kx_databases( ) end +""" + list_kx_dataviews(database_name, environment_id) + list_kx_dataviews(database_name, environment_id, params::Dict{String,<:Any}) + + Returns a list of all the dataviews in the database. + +# Arguments +- `database_name`: The name of the database where the dataviews were created. +- `environment_id`: A unique identifier for the kdb environment, for which you want to + retrieve a list of dataviews. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in this request. +- `"nextToken"`: A token that indicates where a results page should begin. +""" +function list_kx_dataviews( + databaseName, environmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_kx_dataviews( + databaseName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_kx_environments() list_kx_environments(params::Dict{String,<:Any}) @@ -1061,6 +1686,45 @@ function list_kx_environments( ) end +""" + list_kx_scaling_groups(environment_id) + list_kx_scaling_groups(environment_id, params::Dict{String,<:Any}) + + Returns a list of scaling groups in a kdb environment. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment, for which you want to + retrieve a list of scaling groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in this request. +- `"nextToken"`: A token that indicates where a results page should begin. +""" +function list_kx_scaling_groups( + environmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/scalingGroups"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_kx_scaling_groups( + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/scalingGroups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_kx_users(environment_id) list_kx_users(environment_id, params::Dict{String,<:Any}) @@ -1097,6 +1761,45 @@ function list_kx_users( ) end +""" + list_kx_volumes(environment_id) + list_kx_volumes(environment_id, params::Dict{String,<:Any}) + + Lists all the volumes in a kdb environment. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment, whose clusters can attach + to the volume. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in this request. +- `"nextToken"`: A token that indicates where a results page should begin. +- `"volumeType"`: The type of file system volume. Currently, FinSpace only supports NAS_1 + volume type. +""" +function list_kx_volumes(environmentId; aws_config::AbstractAWSConfig=global_aws_config()) + return finspace( + "GET", + "/kx/environments/$(environmentId)/kxvolumes"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_kx_volumes( + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "GET", + "/kx/environments/$(environmentId)/kxvolumes", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1246,6 +1949,65 @@ function update_environment( ) end +""" + update_kx_cluster_code_configuration(cluster_name, code, environment_id) + update_kx_cluster_code_configuration(cluster_name, code, environment_id, params::Dict{String,<:Any}) + + Allows you to update code configuration on a running cluster. By using this API you can +update the code, the initialization script path, and the command line arguments for a +specific cluster. The configuration that you want to update will override any existing +configurations on the cluster. + +# Arguments +- `cluster_name`: The name of the cluster. +- `code`: +- `environment_id`: A unique identifier of the kdb environment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +- `"commandLineArguments"`: Specifies the key-value pairs to make them available inside the + cluster. You cannot update this parameter for a NO_RESTART deployment. +- `"deploymentConfiguration"`: The configuration that allows you to choose how you want to + update the code on a cluster. +- `"initializationScript"`: Specifies a Q program that will be run at launch of a cluster. + It is a relative path within .zip file that contains the custom code, which will be loaded + on the cluster. It must include the file name itself. For example, somedir/init.q. You + cannot update this parameter for a NO_RESTART deployment. +""" +function update_kx_cluster_code_configuration( + clusterName, code, environmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "PUT", + "/kx/environments/$(environmentId)/clusters/$(clusterName)/configuration/code", + Dict{String,Any}("code" => code, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_kx_cluster_code_configuration( + clusterName, + code, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "PUT", + "/kx/environments/$(environmentId)/clusters/$(clusterName)/configuration/code", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("code" => code, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_kx_cluster_databases(cluster_name, databases, environment_id) update_kx_cluster_databases(cluster_name, databases, environment_id, params::Dict{String,<:Any}) @@ -1263,6 +2025,8 @@ different changeset and modify a list of partitions being cached. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +- `"deploymentConfiguration"`: The configuration that allows you to choose how you want to + update the databases on a cluster. """ function update_kx_cluster_databases( clusterName, databases, environmentId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1270,7 +2034,7 @@ function update_kx_cluster_databases( return finspace( "PUT", "/kx/environments/$(environmentId)/clusters/$(clusterName)/configuration/databases", - Dict{String,Any}("databases" => databases); + Dict{String,Any}("databases" => databases, "clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1286,7 +2050,13 @@ function update_kx_cluster_databases( "PUT", "/kx/environments/$(environmentId)/clusters/$(clusterName)/configuration/databases", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("databases" => databases), params) + mergewith( + _merge, + Dict{String,Any}( + "databases" => databases, "clientToken" => string(uuid4()) + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1340,6 +2110,64 @@ function update_kx_database( ) end +""" + update_kx_dataview(client_token, database_name, dataview_name, environment_id) + update_kx_dataview(client_token, database_name, dataview_name, environment_id, params::Dict{String,<:Any}) + + Updates the specified dataview. The dataviews get automatically updated when any new +changesets are ingested. Each update of the dataview creates a new version, including +changeset details and cache configurations + +# Arguments +- `client_token`: A token that ensures idempotency. This token expires in 10 minutes. +- `database_name`: The name of the database. +- `dataview_name`: The name of the dataview that you want to update. +- `environment_id`: A unique identifier for the kdb environment, where you want to update + the dataview. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"changesetId"`: A unique identifier for the changeset. +- `"description"`: The description for a dataview. +- `"segmentConfigurations"`: The configuration that contains the database path of the data + that you want to place on each selected volume. Each segment must have a unique database + path for each volume. If you do not explicitly specify any database path for a volume, they + are accessible from the cluster through the default S3/object store segment. +""" +function update_kx_dataview( + clientToken, + databaseName, + dataviewName, + environmentId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "PUT", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)", + Dict{String,Any}("clientToken" => clientToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_kx_dataview( + clientToken, + databaseName, + dataviewName, + environmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "PUT", + "/kx/environments/$(environmentId)/databases/$(databaseName)/dataviews/$(dataviewName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => clientToken), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_kx_environment(environment_id) update_kx_environment(environment_id, params::Dict{String,<:Any}) @@ -1360,7 +2188,8 @@ function update_kx_environment( ) return finspace( "PUT", - "/kx/environments/$(environmentId)"; + "/kx/environments/$(environmentId)", + Dict{String,Any}("clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1373,7 +2202,9 @@ function update_kx_environment( return finspace( "PUT", "/kx/environments/$(environmentId)", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1405,7 +2236,8 @@ function update_kx_environment_network( ) return finspace( "PUT", - "/kx/environments/$(environmentId)/network"; + "/kx/environments/$(environmentId)/network", + Dict{String,Any}("clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1418,7 +2250,9 @@ function update_kx_environment_network( return finspace( "PUT", "/kx/environments/$(environmentId)/network", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1445,7 +2279,7 @@ function update_kx_user( return finspace( "PUT", "/kx/environments/$(environmentId)/users/$(userName)", - Dict{String,Any}("iamRole" => iamRole); + Dict{String,Any}("iamRole" => iamRole, "clientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1460,7 +2294,61 @@ function update_kx_user( return finspace( "PUT", "/kx/environments/$(environmentId)/users/$(userName)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("iamRole" => iamRole), params)); + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("iamRole" => iamRole, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_kx_volume(environment_id, volume_name) + update_kx_volume(environment_id, volume_name, params::Dict{String,<:Any}) + + Updates the throughput or capacity of a volume. During the update process, the filesystem +might be unavailable for a few minutes. You can retry any operations after the update is +complete. + +# Arguments +- `environment_id`: A unique identifier for the kdb environment where you created the + storage volume. +- `volume_name`: A unique identifier for the volume. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that ensures idempotency. This token expires in 10 minutes. +- `"description"`: A description of the volume. +- `"nas1Configuration"`: Specifies the configuration for the Network attached storage + (NAS_1) file system volume. +""" +function update_kx_volume( + environmentId, volumeName; aws_config::AbstractAWSConfig=global_aws_config() +) + return finspace( + "PATCH", + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_kx_volume( + environmentId, + volumeName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return finspace( + "PATCH", + "/kx/environments/$(environmentId)/kxvolumes/$(volumeName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) diff --git a/src/services/finspace_data.jl b/src/services/finspace_data.jl index 6fe63864e6..c57eb72814 100644 --- a/src/services/finspace_data.jl +++ b/src/services/finspace_data.jl @@ -8,8 +8,8 @@ using AWS.UUIDs associate_user_to_permission_group(permission_group_id, user_id) associate_user_to_permission_group(permission_group_id, user_id, params::Dict{String,<:Any}) -Adds a user account to a permission group to grant permissions for actions a user can -perform in FinSpace. +Adds a user to a permission group to grant permissions for actions a user can perform in +FinSpace. # Arguments - `permission_group_id`: The unique identifier for the permission group. @@ -342,7 +342,7 @@ Creates a new user in FinSpace. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ApiAccess"`: The option to indicate whether the user can use the +- `"apiAccess"`: The option to indicate whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations. ENABLED – The user has permissions to use the APIs. DISABLED – The user does not have permissions to use any APIs. @@ -475,7 +475,7 @@ end Denies access to the FinSpace web application and API for the specified user. # Arguments -- `user_id`: The unique identifier for the user account that you want to disable. +- `user_id`: The unique identifier for the user that you want to deactivate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -508,7 +508,7 @@ end disassociate_user_from_permission_group(permission_group_id, user_id) disassociate_user_from_permission_group(permission_group_id, user_id, params::Dict{String,<:Any}) -Removes a user account from a permission group. +Removes a user from a permission group. # Arguments - `permission_group_id`: The unique identifier for the permission group. @@ -553,7 +553,7 @@ end Allows the specified user to access the FinSpace web application and API. # Arguments -- `user_id`: The unique identifier for the user account that you want to enable. +- `user_id`: The unique identifier for the user that you want to activate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -763,7 +763,8 @@ end get_programmatic_access_credentials(environment_id) get_programmatic_access_credentials(environment_id, params::Dict{String,<:Any}) -Request programmatic credentials to use with FinSpace SDK. +Request programmatic credentials to use with FinSpace SDK. For more information, see Step +2. Access credentials programmatically using IAM access key id and secret access key. # Arguments - `environment_id`: The FinSpace environment identifier. @@ -999,7 +1000,7 @@ end list_permission_groups_by_user(max_results, user_id) list_permission_groups_by_user(max_results, user_id, params::Dict{String,<:Any}) -Lists all the permission groups that are associated with a specific user account. +Lists all the permission groups that are associated with a specific user. # Arguments - `max_results`: The maximum number of results per page. @@ -1041,7 +1042,7 @@ end list_users(max_results) list_users(max_results, params::Dict{String,<:Any}) -Lists all available user accounts in FinSpace. +Lists all available users in FinSpace. # Arguments - `max_results`: The maximum number of results per page. @@ -1354,10 +1355,10 @@ end update_user(user_id) update_user(user_id, params::Dict{String,<:Any}) -Modifies the details of the specified user account. You cannot update the userId for a user. +Modifies the details of the specified user. You cannot update the userId for a user. # Arguments -- `user_id`: The unique identifier for the user account to update. +- `user_id`: The unique identifier for the user that you want to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/firehose.jl b/src/services/firehose.jl index 8880744231..d50f06233d 100644 --- a/src/services/firehose.jl +++ b/src/services/firehose.jl @@ -8,50 +8,51 @@ using AWS.UUIDs create_delivery_stream(delivery_stream_name) create_delivery_stream(delivery_stream_name, params::Dict{String,<:Any}) -Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 -delivery streams per Amazon Web Services Region. This is an asynchronous operation that -immediately returns. The initial status of the delivery stream is CREATING. After the -delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery -stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to -a delivery stream that is not in the ACTIVE state cause an exception. To check the state of -a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is -CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream -again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A -Kinesis Data Firehose delivery stream can be configured to receive records directly from -providers using PutRecord or PutRecordBatch, or it can be configured to use an existing -Kinesis stream as its source. To specify a Kinesis data stream as input, set the -DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream -Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. -To create a delivery stream with server-side encryption (SSE) enabled, include -DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also -invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that -doesn't have SSE enabled. A delivery stream is configured with a single destination: Amazon -S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following -destination configuration parameters: ExtendedS3DestinationConfiguration, -S3DestinationConfiguration, ElasticsearchDestinationConfiguration, -RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify -S3DestinationConfiguration, you can also provide the following optional values: -BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no -BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 -minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are -some cases where the service cannot adhere to these conditions strictly. For example, -record boundaries might be such that the size is a little over or under the configured -buffering size. By default, no encryption is performed. We strongly recommend that you -enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon -Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as -intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses +Creates a Firehose delivery stream. By default, you can create up to 50 delivery streams +per Amazon Web Services Region. This is an asynchronous operation that immediately returns. +The initial status of the delivery stream is CREATING. After the delivery stream is +created, its status is ACTIVE and it now accepts data. If the delivery stream creation +fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery +stream that is not in the ACTIVE state cause an exception. To check the state of a delivery +stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, +this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, +you can invoke the DeleteDeliveryStream operation to delete it. A Firehose delivery stream +can be configured to receive records directly from providers using PutRecord or +PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To +specify a Kinesis data stream as input, set the DeliveryStreamType parameter to +KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role +ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with +server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in +your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn +on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is +configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), +Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any +custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service +providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. +You must specify only one of the following destination configuration parameters: +ExtendedS3DestinationConfiguration, S3DestinationConfiguration, +ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or +SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also +provide the following optional values: BufferingHints, EncryptionConfiguration, and +CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers +data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is +a hint, so there are some cases where the service cannot adhere to these conditions +strictly. For example, record boundaries might be such that the size is a little over or +under the configured buffering size. By default, no encryption is performed. We strongly +recommend that you enable encryption to ensure secure data storage in Amazon S3. A few +notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an +S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password -you provide exclusively with Kinesis Data Firehose, and that the permissions for the -account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose -assumes the IAM role that is configured as part of the destination. The role should allow -the Kinesis Data Firehose principal to assume the role, and the role should have -permissions that allow the service to deliver the data. For more information, see Grant -Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data -Firehose Developer Guide. +you provide exclusively with Firehose, and that the permissions for the account are +restricted for Amazon Redshift INSERT permissions. Firehose assumes the IAM role that is +configured as part of the destination. The role should allow the Firehose principal to +assume the role, and the role should have permissions that allow the service to deliver the +data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the +Amazon Firehose Developer Guide. # Arguments - `delivery_stream_name`: The name of the delivery stream. This name must be unique per @@ -79,10 +80,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KinesisStreamSourceConfiguration"`: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream. +- `"MSKSourceConfiguration"`: - `"RedshiftDestinationConfiguration"`: The destination in Amazon Redshift. You can specify only one destination. - `"S3DestinationConfiguration"`: [Deprecated] The destination in Amazon S3. You can specify only one destination. +- `"SnowflakeDestinationConfiguration"`: Configure Snowflake destination - `"SplunkDestinationConfiguration"`: The destination in Splunk. You can specify only one destination. - `"Tags"`: A set of tags to assign to the delivery stream. A tag is a key-value pair that @@ -90,7 +93,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up - to 50 tags when creating a delivery stream. + to 50 tags when creating a delivery stream. If you specify tags in the CreateDeliveryStream + action, Amazon Data Firehose performs an additional authorization on the + firehose:TagDeliveryStream action to verify if users have permissions to create tags. If + you do not provide this permission, requests to create new Firehose delivery streams with + IAM resource tags will fail with an AccessDeniedException such as following. + AccessDeniedException User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: + firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with + an explicit deny in an identity-based policy. For an example IAM policy, see Tag example. """ function create_delivery_stream( DeliveryStreamName; aws_config::AbstractAWSConfig=global_aws_config() @@ -123,14 +133,18 @@ end delete_delivery_stream(delivery_stream_name) delete_delivery_stream(delivery_stream_name, params::Dict{String,<:Any}) -Deletes a delivery stream and its data. To check the state of a delivery stream, use -DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the -following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a -delivery stream that is in the CREATING state. While the deletion request is in process, -the delivery stream is in the DELETING state. While the delivery stream is in the DELETING +Deletes a delivery stream and its data. You can delete a delivery stream only if it is in +one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You +can't delete a delivery stream that is in the CREATING state. To check the state of a +delivery stream, use DescribeDeliveryStream. DeleteDeliveryStream is an asynchronous API. +When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for +deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any -applications that are sending records before you delete a delivery stream. +applications that are sending records before you delete a delivery stream. Removal of a +delivery stream that is in the DELETING state is a low priority operation for the service. +A stream may remain in the DELETING state for several minutes. Therefore, as a best +practice, applications should not wait for streams in the DELETING state to be removed. # Arguments - `delivery_stream_name`: The name of the delivery stream. @@ -138,12 +152,11 @@ applications that are sending records before you delete a delivery stream. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllowForceDelete"`: Set this to true if you want to delete the delivery stream even if - Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose - might be unable to retire the grant due to a customer error, such as when the CMK or the - grant are in an invalid state. If you force deletion, you can then use the RevokeGrant - operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the - grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying - the delete operation. The default value is false. + Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the + grant due to a customer error, such as when the CMK or the grant are in an invalid state. + If you force deletion, you can then use the RevokeGrant operation to revoke the grant you + gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services + KMS issue, Firehose keeps retrying the delete operation. The default value is false. """ function delete_delivery_stream( DeliveryStreamName; aws_config::AbstractAWSConfig=global_aws_config() @@ -190,7 +203,7 @@ again but with DeleteDeliveryStreamInputAllowForceDelete set to true. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ExclusiveStartDestinationId"`: The ID of the destination to start returning the - destination information. Kinesis Data Firehose supports one destination per delivery stream. + destination information. Firehose supports one destination per delivery stream. - `"Limit"`: The limit on the number of destinations to return. You can have one destination per delivery stream. """ @@ -311,29 +324,35 @@ end put_record(delivery_stream_name, record) put_record(delivery_stream_name, record, params::Dict{String,<:Any}) -Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write -multiple data records into a delivery stream, use PutRecordBatch. Applications using these -operations are referred to as producers. By default, each delivery stream can take in up to -2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use -PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for -each delivery stream. For more information about limits and how to request an increase, see -Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and -the data record when using PutRecord. The data record consists of a data blob that can be -up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log -file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose -buffers records before delivering them to the destination. To disambiguate the data blobs -at the destination, a common solution is to use delimiters in the data, such as a newline -(n) or some other character unique within the data. This allows the consumer application to -parse individual data items when reading the data from the destination. The PutRecord -operation returns a RecordId, which is a unique string assigned to each record. Producer -applications can use this ID for purposes such as auditability and investigation. If the -PutRecord operation throws a ServiceUnavailableException, back off and retry. If the -exception persists, it is possible that the throughput limits have been exceeded for the -delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from -the time they are added to a delivery stream as it tries to send the records to the -destination. If the destination is unreachable for more than 24 hours, the data is no -longer available. Don't concatenate two or more base64 strings to form the data fields of -your records. Instead, concatenate the raw data, then perform base64 encoding. +Writes a single data record into an Amazon Firehose delivery stream. To write multiple data +records into a delivery stream, use PutRecordBatch. Applications using these operations are +referred to as producers. By default, each delivery stream can take in up to 2,000 +transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord +and PutRecordBatch, the limits are an aggregate across these two operations for each +delivery stream. For more information about limits and how to request an increase, see +Amazon Firehose Limits. Firehose accumulates and publishes a particular metric for a +customer account in one minute intervals. It is possible that the bursts of incoming +bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the +actual spikes in the traffic might not be fully visible in the customer's 1 minute +CloudWatch metrics. You must specify the name of the delivery stream and the data record +when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB +in size, and any kind of data. For example, it can be a segment from a log file, geographic +location data, website clickstream data, and so on. Firehose buffers records before +delivering them to the destination. To disambiguate the data blobs at the destination, a +common solution is to use delimiters in the data, such as a newline (n) or some other +character unique within the data. This allows the consumer application to parse individual +data items when reading the data from the destination. The PutRecord operation returns a +RecordId, which is a unique string assigned to each record. Producer applications can use +this ID for purposes such as auditability and investigation. If the PutRecord operation +throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. +If the exception persists, it is possible that the throughput limits have been exceeded for +the delivery stream. Re-invoking the Put API operations (for example, PutRecord and +PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer +time out before retrying Put API operations. Data records sent to Firehose are stored for +24 hours from the time they are added to a delivery stream as it tries to send the records +to the destination. If the destination is unreachable for more than 24 hours, the data is +no longer available. Don't concatenate two or more base64 strings to form the data fields +of your records. Instead, concatenate the raw data, then perform base64 encoding. # Arguments - `delivery_stream_name`: The name of the delivery stream. @@ -379,43 +398,48 @@ end Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are -referred to as producers. For information about service quota, see Amazon Kinesis Data -Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the -request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the -entire request. These limits cannot be changed. You must specify the name of the delivery -stream and the data record when using PutRecord. The data record consists of a data blob -that can be up to 1,000 KB in size, and any kind of data. For example, it could be a -segment from a log file, geographic location data, website clickstream data, and so on. -Kinesis Data Firehose buffers records before delivering them to the destination. To -disambiguate the data blobs at the destination, a common solution is to use delimiters in -the data, such as a newline (n) or some other character unique within the data. This allows -the consumer application to parse individual data items when reading the data from the -destination. The PutRecordBatch response includes a count of failed records, -FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch -call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are -records for which the operation didn't succeed. Each entry in the RequestResponses array -provides additional information about the processed record. It directly correlates with a -record in the request array using the same ordering, from the top to the bottom. The -response array always includes the same number of records as the request array. -RequestResponses includes both successfully and unsuccessfully processed records. Kinesis -Data Firehose tries to process all records in each PutRecordBatch request. A single record -failure does not stop the processing of subsequent records. A successfully processed -record includes a RecordId value, which is unique for the record. An unsuccessfully -processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of -error, and is one of the following values: ServiceUnavailableException or InternalFailure. -ErrorMessage provides more detailed information about the error. If there is an internal -server error or a timeout, the write might have completed or it might have failed. If -FailedPutCount is greater than 0, retry the request, resending only those records that -might have failed processing. This minimizes the possible duplicate records and also -reduces the total bytes sent (and corresponding charges). We recommend that you handle any -duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back -off and retry. If the exception persists, it is possible that the throughput limits have -been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are -stored for 24 hours from the time they are added to a delivery stream as it attempts to -send the records to the destination. If the destination is unreachable for more than 24 -hours, the data is no longer available. Don't concatenate two or more base64 strings to -form the data fields of your records. Instead, concatenate the raw data, then perform -base64 encoding. +referred to as producers. Firehose accumulates and publishes a particular metric for a +customer account in one minute intervals. It is possible that the bursts of incoming +bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the +actual spikes in the traffic might not be fully visible in the customer's 1 minute +CloudWatch metrics. For information about service quota, see Amazon Firehose Quota. Each +PutRecordBatch request supports up to 500 records. Each record in the request can be as +large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. +These limits cannot be changed. You must specify the name of the delivery stream and the +data record when using PutRecord. The data record consists of a data blob that can be up to +1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, +geographic location data, website clickstream data, and so on. Firehose buffers records +before delivering them to the destination. To disambiguate the data blobs at the +destination, a common solution is to use delimiters in the data, such as a newline (n) or +some other character unique within the data. This allows the consumer application to parse +individual data items when reading the data from the destination. The PutRecordBatch +response includes a count of failed records, FailedPutCount, and an array of responses, +RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may +be greater than 0, indicating that there are records for which the operation didn't +succeed. Each entry in the RequestResponses array provides additional information about the +processed record. It directly correlates with a record in the request array using the same +ordering, from the top to the bottom. The response array always includes the same number of +records as the request array. RequestResponses includes both successfully and +unsuccessfully processed records. Firehose tries to process all records in each +PutRecordBatch request. A single record failure does not stop the processing of subsequent +records. A successfully processed record includes a RecordId value, which is unique for +the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. +ErrorCode reflects the type of error, and is one of the following values: +ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed +information about the error. If there is an internal server error or a timeout, the write +might have completed or it might have failed. If FailedPutCount is greater than 0, retry +the request, resending only those records that might have failed processing. This minimizes +the possible duplicate records and also reduces the total bytes sent (and corresponding +charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch +throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. +If the exception persists, it is possible that the throughput limits have been exceeded for +the delivery stream. Re-invoking the Put API operations (for example, PutRecord and +PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer +time out before retrying Put API operations. Data records sent to Firehose are stored for +24 hours from the time they are added to a delivery stream as it attempts to send the +records to the destination. If the destination is unreachable for more than 24 hours, the +data is no longer available. Don't concatenate two or more base64 strings to form the data +fields of your records. Instead, concatenate the raw data, then perform base64 encoding. # Arguments - `delivery_stream_name`: The name of the delivery stream. @@ -459,33 +483,35 @@ end start_delivery_stream_encryption(delivery_stream_name, params::Dict{String,<:Any}) Enables server-side encryption (SSE) for the delivery stream. This operation is -asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets -the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status -of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If -the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to -read and write data to your delivery stream while the encryption status is ENABLING, but -the data is not encrypted. It can take up to 5 seconds after the encryption status changes -to ENABLED before all records written to the delivery stream are encrypted. To find out +asynchronous. It returns immediately. When you invoke it, Firehose first sets the +encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of +a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the +operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read +and write data to your delivery stream while the encryption status is ENABLING, but the +data is not encrypted. It can take up to 5 seconds after the encryption status changes to +ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutputEncrypted and PutRecordBatchOutputEncrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the -CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the -grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, -Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and -decrypt data and to manage the grant. If a delivery stream already has encryption enabled -and then you invoke this operation to change the ARN of the CMK or both its type and ARN -and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In -this case, encryption remains enabled with the old CMK. If the encryption status of your -delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. -The CMK must be enabled and the key policy mustn't explicitly deny the permission for -Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for -a delivery stream only if it's a delivery stream that uses DirectPut as its source. The -StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined -limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you -call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for -the same delivery stream in a 24-hour period. +CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had +on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose +creates a grant that enables it to use the new CMK to encrypt and decrypt data and to +manage the grant. For the KMS grant creation to be successful, the Firehose API operations +StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session +credentials that are more than 6 hours old. If a delivery stream already has encryption +enabled and then you invoke this operation to change the ARN of the CMK or both its type +and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK +failed. In this case, encryption remains enabled with the old CMK. If the encryption status +of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a +valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the +permission for Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE +for a delivery stream only if it's a delivery stream that uses DirectPut as its source. +The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a +combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the +limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption +12 times for the same delivery stream in a 24-hour period. # Arguments - `delivery_stream_name`: The name of the delivery stream for which you want to enable @@ -528,21 +554,20 @@ end stop_delivery_stream_encryption(delivery_stream_name, params::Dict{String,<:Any}) Disables server-side encryption (SSE) for the delivery stream. This operation is -asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets -the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to +asynchronous. It returns immediately. When you invoke it, Firehose first sets the +encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutputEncrypted and PutRecordBatchOutputEncrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. If SSE is enabled using a customer managed CMK and -then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related -KMS grant for retirement and then retires it after it ensures that it is finished -delivering records to the destination. The StartDeliveryStreamEncryption and -StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery -stream per 24 hours. For example, you reach the limit if you call -StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the -same delivery stream in a 24-hour period. +then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for +retirement and then retires it after it ensures that it is finished delivering records to +the destination. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption +operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, +you reach the limit if you call StartDeliveryStreamEncryption 13 times and +StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period. # Arguments - `delivery_stream_name`: The name of the delivery stream for which you want to disable @@ -683,16 +708,16 @@ Redshift) or change the parameters associated with a destination (for example, t the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are -usually effective within a few minutes. Switching between Amazon ES and other services is -not supported. For an Amazon ES destination, you can only update to another Amazon ES -destination. If the destination type is the same, Kinesis Data Firehose merges the -configuration parameters specified with the destination configuration that already exists -on the delivery stream. If any of the parameters are not specified in the call, the -existing values are retained. For example, in the Amazon S3 destination, if -EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is -maintained on the destination. If the destination type is not the same, for example, -changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not -merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose +usually effective within a few minutes. Switching between Amazon OpenSearch Service and +other services is not supported. For an Amazon OpenSearch Service destination, you can only +update to another Amazon OpenSearch Service destination. If the destination type is the +same, Firehose merges the configuration parameters specified with the destination +configuration that already exists on the delivery stream. If any of the parameters are not +specified in the call, the existing values are retained. For example, in the Amazon S3 +destination, if EncryptionConfiguration is not specified, then the existing +EncryptionConfiguration is maintained on the destination. If the destination type is not +the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose +does not merge any parameters. In this case, all parameters must be specified. Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the @@ -721,6 +746,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys destination. - `"RedshiftDestinationUpdate"`: Describes an update for a destination in Amazon Redshift. - `"S3DestinationUpdate"`: [Deprecated] Describes an update for a destination in Amazon S3. +- `"SnowflakeDestinationUpdate"`: Update to the Snowflake destination configuration + settings. - `"SplunkDestinationUpdate"`: Describes an update for a destination in Splunk. """ function update_destination( diff --git a/src/services/fis.jl b/src/services/fis.jl index df40953f88..52a6f21e24 100644 --- a/src/services/fis.jl +++ b/src/services/fis.jl @@ -15,8 +15,8 @@ have specific tags. Actions: The actions to carry out on the target. You can multiple actions, the duration of each action, and when to start each action during an experiment. Stop conditions: If a stop condition is triggered while an experiment is running, the experiment is automatically stopped. You can define a stop condition as a -CloudWatch alarm. For more information, see Experiment templates in the Fault Injection -Simulator User Guide. +CloudWatch alarm. For more information, see experiment templates in the Fault Injection +Service User Guide. # Arguments - `actions`: The actions for the experiment. @@ -29,6 +29,7 @@ Simulator User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"experimentOptions"`: The experiment options for the experiment template. - `"logConfiguration"`: The configuration for experiment logging. - `"tags"`: The tags to apply to the experiment template. - `"targets"`: The targets for the experiment. @@ -85,6 +86,59 @@ function create_experiment_template( ) end +""" + create_target_account_configuration(account_id, id, role_arn) + create_target_account_configuration(account_id, id, role_arn, params::Dict{String,<:Any}) + +Creates a target account configuration for the experiment template. A target account +configuration is required when accountTargeting of experimentOptions is set to +multi-account. For more information, see experiment options in the Fault Injection Service +User Guide. + +# Arguments +- `account_id`: The Amazon Web Services account ID of the target account. +- `id`: The experiment template ID. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM role for the target account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. +- `"description"`: The description of the target account. +""" +function create_target_account_configuration( + accountId, id, roleArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "POST", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)", + Dict{String,Any}("roleArn" => roleArn, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_target_account_configuration( + accountId, + id, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "POST", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("roleArn" => roleArn, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_experiment_template(id) delete_experiment_template(id, params::Dict{String,<:Any}) @@ -115,6 +169,42 @@ function delete_experiment_template( ) end +""" + delete_target_account_configuration(account_id, id) + delete_target_account_configuration(account_id, id, params::Dict{String,<:Any}) + +Deletes the specified target account configuration of the experiment template. + +# Arguments +- `account_id`: The Amazon Web Services account ID of the target account. +- `id`: The ID of the experiment template. + +""" +function delete_target_account_configuration( + accountId, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "DELETE", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_target_account_configuration( + accountId, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "DELETE", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_action(id) get_action(id, params::Dict{String,<:Any}) @@ -169,6 +259,42 @@ function get_experiment( ) end +""" + get_experiment_target_account_configuration(account_id, id) + get_experiment_target_account_configuration(account_id, id, params::Dict{String,<:Any}) + +Gets information about the specified target account configuration of the experiment. + +# Arguments +- `account_id`: The Amazon Web Services account ID of the target account. +- `id`: The ID of the experiment. + +""" +function get_experiment_target_account_configuration( + accountId, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experiments/$(id)/targetAccountConfigurations/$(accountId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_experiment_target_account_configuration( + accountId, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "GET", + "/experiments/$(id)/targetAccountConfigurations/$(accountId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_experiment_template(id) get_experiment_template(id, params::Dict{String,<:Any}) @@ -199,6 +325,43 @@ function get_experiment_template( ) end +""" + get_target_account_configuration(account_id, id) + get_target_account_configuration(account_id, id, params::Dict{String,<:Any}) + +Gets information about the specified target account configuration of the experiment +template. + +# Arguments +- `account_id`: The Amazon Web Services account ID of the target account. +- `id`: The ID of the experiment template. + +""" +function get_target_account_configuration( + accountId, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_target_account_configuration( + accountId, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "GET", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_target_resource_type(resource_type) get_target_resource_type(resource_type, params::Dict{String,<:Any}) @@ -256,6 +419,79 @@ function list_actions( ) end +""" + list_experiment_resolved_targets(id) + list_experiment_resolved_targets(id, params::Dict{String,<:Any}) + +Lists the resolved targets information of the specified experiment. + +# Arguments +- `id`: The ID of the experiment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. +- `"nextToken"`: The token for the next page of results. +- `"targetName"`: The name of the target. +""" +function list_experiment_resolved_targets( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experiments/$(id)/resolvedTargets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_experiment_resolved_targets( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experiments/$(id)/resolvedTargets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_experiment_target_account_configurations(id) + list_experiment_target_account_configurations(id, params::Dict{String,<:Any}) + +Lists the target account configurations of the specified experiment. + +# Arguments +- `id`: The ID of the experiment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"nextToken"`: The token for the next page of results. +""" +function list_experiment_target_account_configurations( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experiments/$(id)/targetAccountConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_experiment_target_account_configurations( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experiments/$(id)/targetAccountConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_experiment_templates() list_experiment_templates(params::Dict{String,<:Any}) @@ -296,6 +532,7 @@ Lists your experiments. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"experimentTemplateId"`: The ID of the experiment template. - `"maxResults"`: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value. - `"nextToken"`: The token for the next page of results. @@ -351,6 +588,43 @@ function list_tags_for_resource( ) end +""" + list_target_account_configurations(id) + list_target_account_configurations(id, params::Dict{String,<:Any}) + +Lists the target account configurations of the specified experiment template. + +# Arguments +- `id`: The ID of the experiment template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. +- `"nextToken"`: The token for the next page of results. +""" +function list_target_account_configurations( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experimentTemplates/$(id)/targetAccountConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_target_account_configurations( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/experimentTemplates/$(id)/targetAccountConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_target_resource_types() list_target_resource_types(params::Dict{String,<:Any}) @@ -396,6 +670,7 @@ Starts running an experiment from the specified experiment template. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"experimentOptions"`: The experiment options for running the experiment. - `"tags"`: The tags to apply to the experiment. """ function start_experiment( @@ -548,6 +823,7 @@ Updates the specified experiment template. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"actions"`: The actions for the experiment. - `"description"`: A description for the template. +- `"experimentOptions"`: The experiment options for the experiment template. - `"logConfiguration"`: The configuration for experiment logging. - `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role that grants the FIS service permission to perform service actions on your behalf. @@ -573,3 +849,43 @@ function update_experiment_template( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_target_account_configuration(account_id, id) + update_target_account_configuration(account_id, id, params::Dict{String,<:Any}) + +Updates the target account configuration for the specified experiment template. + +# Arguments +- `account_id`: The Amazon Web Services account ID of the target account. +- `id`: The ID of the experiment template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the target account. +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role for the target account. +""" +function update_target_account_configuration( + accountId, id; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "PATCH", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_target_account_configuration( + accountId, + id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "PATCH", + "/experimentTemplates/$(id)/targetAccountConfigurations/$(accountId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/fms.jl b/src/services/fms.jl index 0ebafd96af..c3b62d7a20 100644 --- a/src/services/fms.jl +++ b/src/services/fms.jl @@ -255,12 +255,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cleanup does the following for each security group in the policy: Disassociates the security group from in-scope resources Deletes the security group if it was created through Firewall Manager and if it's no longer associated with any resources through - another policy After the cleanup, in-scope resources are no longer protected by web ACLs - in this policy. Protection of out-of-scope resources remains unchanged. Scope is determined - by tags that you create and accounts that you associate with the policy. When creating the - policy, if you specify that only resources in specific accounts or with specific tags are - in scope of the policy, those accounts and resources are handled by the policy. All others - are out of scope. If you don't specify tags or accounts, all resources are in scope. + another policy For security group common policies, even if set to False, Firewall + Manager deletes all security groups created by Firewall Manager that aren't associated with + any other resources through another policy. After the cleanup, in-scope resources are no + longer protected by web ACLs in this policy. Protection of out-of-scope resources remains + unchanged. Scope is determined by tags that you create and accounts that you associate with + the policy. When creating the policy, if you specify that only resources in specific + accounts or with specific tags are in scope of the policy, those accounts and resources are + handled by the policy. All others are out of scope. If you don't specify tags or accounts, + all resources are in scope. """ function delete_policy(PolicyId; aws_config::AbstractAWSConfig=global_aws_config()) return fms( @@ -440,11 +443,11 @@ end get_admin_scope(admin_account) get_admin_scope(admin_account, params::Dict{String,<:Any}) -Returns information about the specified account's administrative scope. The admistrative +Returns information about the specified account's administrative scope. The administrative scope defines the resources that an Firewall Manager administrator can manage. # Arguments -- `admin_account`: The administator account that you want to get the details for. +- `admin_account`: The administrator account that you want to get the details for. """ function get_admin_scope(AdminAccount; aws_config::AbstractAWSConfig=global_aws_config()) @@ -508,17 +511,8 @@ end get_compliance_detail(member_account, policy_id, params::Dict{String,<:Any}) Returns detailed compliance information about the specified member account. Details include -resources that are in and out of compliance with the specified policy. Resources are -considered noncompliant for WAF and Shield Advanced policies if the specified policy has -not been applied to them. Resources are considered noncompliant for security group -policies if they are in scope of the policy, they violate one or more of the policy rules, -and remediation is disabled or not possible. Resources are considered noncompliant for -Network Firewall policies if a firewall is missing in the VPC, if the firewall endpoint -isn't set up in an expected Availability Zone and subnet, if a subnet created by the -Firewall Manager doesn't have the expected route table, and for modifications to a firewall -policy that violate the Firewall Manager policy's rules. Resources are considered -noncompliant for DNS Firewall policies if a DNS Firewall rule group is missing from the -rule group associations for the VPC. +resources that are in and out of compliance with the specified policy. The reasons for +resources being considered compliant depend on the Firewall Manager policy type. # Arguments - `member_account`: The Amazon Web Services account that owns the resources that you want @@ -780,8 +774,10 @@ Amazon Web Services account. # Arguments - `member_account`: The Amazon Web Services account ID that you want the details for. -- `policy_id`: The ID of the Firewall Manager policy that you want the details for. This - currently only supports security group content audit policies. +- `policy_id`: The ID of the Firewall Manager policy that you want the details for. You can + get violation details for the following policy types: DNS Firewall Imported Network + Firewall Network Firewall Security group content audit Network ACL Third-party + firewall - `resource_id`: The ID of the resource that has violations. - `resource_type`: The resource type. This is in the format shown in the Amazon Web Services Resource Types Reference. Supported resource types are: AWS::EC2::Instance, @@ -1467,19 +1463,31 @@ end put_policy(policy) put_policy(policy, params::Dict{String,<:Any}) -Creates an Firewall Manager policy. Firewall Manager provides the following types of -policies: An WAF policy (type WAFV2), which defines rule groups to run first in the -corresponding WAF web ACL and rule groups to run last in the web ACL. An WAF Classic -policy (type WAF), which defines a rule group. A Shield Advanced policy, which applies -Shield Advanced protection to specified accounts and resources. A security group policy, -which manages VPC security groups across your Amazon Web Services organization. An -Network Firewall policy, which provides firewall rules to filter network traffic in -specified Amazon VPCs. A DNS Firewall policy, which provides Route 53 Resolver DNS -Firewall rules to filter DNS queries for specified VPCs. Each policy is specific to one -of the types. If you want to enforce more than one policy type across accounts, create -multiple policies. You can create multiple policies for each type. You must be subscribed -to Shield Advanced to create a Shield Advanced policy. For more information about -subscribing to Shield Advanced, see CreateSubscription. +Creates an Firewall Manager policy. A Firewall Manager policy is specific to the individual +policy type. If you want to enforce multiple policy types across accounts, you can create +multiple policies. You can create more than one policy for each type. If you add a new +account to an organization that you created with Organizations, Firewall Manager +automatically applies the policy to the resources in that account that are within scope of +the policy. Firewall Manager provides the following types of policies: WAF policy - +This policy applies WAF web ACL protections to specified accounts and resources. Shield +Advanced policy - This policy applies Shield Advanced protection to specified accounts and +resources. Security Groups policy - This type of policy gives you control over security +groups that are in use throughout your organization in Organizations and lets you enforce a +baseline set of rules across your organization. Network ACL policy - This type of +policy gives you control over the network ACLs that are in use throughout your organization +in Organizations and lets you enforce a baseline set of first and last network ACL rules +across your organization. Network Firewall policy - This policy applies Network +Firewall protection to your organization's VPCs. DNS Firewall policy - This policy +applies Amazon Route 53 Resolver DNS Firewall protections to your organization's VPCs. +Third-party firewall policy - This policy applies third-party firewall protections. +Third-party firewalls are available by subscription through the Amazon Web Services +Marketplace console at Amazon Web Services Marketplace. Palo Alto Networks Cloud NGFW +policy - This policy applies Palo Alto Networks Cloud Next Generation Firewall (NGFW) +protections and Palo Alto Networks Cloud NGFW rulestacks to your organization's VPCs. +Fortigate CNF policy - This policy applies Fortigate Cloud Native Firewall (CNF) +protections. Fortigate CNF is a cloud-centered solution that blocks Zero-Day threats and +secures cloud infrastructures with industry-leading advanced threat prevention, smart web +application firewalls (WAF), and API protection. # Arguments - `policy`: The details of the Firewall Manager policy to be created. diff --git a/src/services/freetier.jl b/src/services/freetier.jl new file mode 100644 index 0000000000..fe4902300a --- /dev/null +++ b/src/services/freetier.jl @@ -0,0 +1,33 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: freetier +using AWS.Compat +using AWS.UUIDs + +""" + get_free_tier_usage() + get_free_tier_usage(params::Dict{String,<:Any}) + +Returns a list of all Free Tier usage objects that match your filters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An expression that specifies the conditions that you want each FreeTierUsage + object to meet. +- `"maxResults"`: The maximum number of results to return in the response. MaxResults means + that there can be up to the specified number of values, but there might be fewer results + based on your filters. +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +""" +function get_free_tier_usage(; aws_config::AbstractAWSConfig=global_aws_config()) + return freetier( + "GetFreeTierUsage"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_free_tier_usage( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return freetier( + "GetFreeTierUsage", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end diff --git a/src/services/fsx.jl b/src/services/fsx.jl index 6a2ee7dd4c..19a3e76fc3 100644 --- a/src/services/fsx.jl +++ b/src/services/fsx.jl @@ -77,10 +77,12 @@ end cancel_data_repository_task(task_id, params::Dict{String,<:Any}) Cancels an existing Amazon FSx for Lustre data repository task if that task is in either -the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following. -Any files that FSx has already exported are not reverted. FSx continues to export any -files that are \"in-flight\" when the cancel operation is received. FSx does not export -any files that have not yet been exported. +the PENDING or EXECUTING state. When you cancel am export task, Amazon FSx does the +following. Any files that FSx has already exported are not reverted. FSx continues to +export any files that are in-flight when the cancel operation is received. FSx does not +export any files that have not yet been exported. For a release task, Amazon FSx will +stop releasing files upon cancellation. Any files that have already been released will +remain in the released state. # Arguments - `task_id`: Specifies the data repository task to cancel. @@ -184,6 +186,74 @@ function copy_backup( ) end +""" + copy_snapshot_and_update_volume(source_snapshot_arn, volume_id) + copy_snapshot_and_update_volume(source_snapshot_arn, volume_id, params::Dict{String,<:Any}) + +Updates an existing volume by using a snapshot from another Amazon FSx for OpenZFS file +system. For more information, see on-demand data replication in the Amazon FSx for OpenZFS +User Guide. + +# Arguments +- `source_snapshot_arn`: +- `volume_id`: Specifies the ID of the volume that you are copying the snapshot to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: +- `"CopyStrategy"`: Specifies the strategy to use when copying data from a snapshot to the + volume. FULL_COPY - Copies all data from the snapshot to the volume. + INCREMENTAL_COPY - Copies only the snapshot data that's changed since the previous + replication. CLONE isn't a valid copy strategy option for the + CopySnapshotAndUpdateVolume operation. +- `"Options"`: Confirms that you want to delete data on the destination volume that + wasn’t there during the previous snapshot replication. Your replication will fail if you + don’t include an option for a specific type of data and that data is on your destination. + For example, if you don’t include DELETE_INTERMEDIATE_SNAPSHOTS and there are + intermediate snapshots on the destination, you can’t copy the snapshot. + DELETE_INTERMEDIATE_SNAPSHOTS - Deletes snapshots on the destination volume that aren’t + on the source volume. DELETE_CLONED_VOLUMES - Deletes snapshot clones on the destination + volume that aren't on the source volume. DELETE_INTERMEDIATE_DATA - Overwrites snapshots + on the destination volume that don’t match the source snapshot that you’re copying. +""" +function copy_snapshot_and_update_volume( + SourceSnapshotARN, VolumeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "CopySnapshotAndUpdateVolume", + Dict{String,Any}( + "SourceSnapshotARN" => SourceSnapshotARN, + "VolumeId" => VolumeId, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_snapshot_and_update_volume( + SourceSnapshotARN, + VolumeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fsx( + "CopySnapshotAndUpdateVolume", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SourceSnapshotARN" => SourceSnapshotARN, + "VolumeId" => VolumeId, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_backup() create_backup(params::Dict{String,<:Any}) @@ -255,7 +325,7 @@ end Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data -repository associations are supported on all FSx for Lustre 2.12 and newer file systems, +repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type. Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export @@ -343,14 +413,18 @@ end create_data_repository_task(file_system_id, report, type) create_data_repository_task(file_system_id, report, type, params::Dict{String,<:Any}) -Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to -perform bulk operations between your Amazon FSx file system and its linked data -repositories. An example of a data repository task is exporting any data and metadata -changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) -from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation -will fail if a data repository is not linked to the FSx file system. To learn more about -data repository tasks, see Data Repository Tasks. To learn more about linking a data -repository to your file system, see Linking your file system to an S3 bucket. +Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation +will fail if a data repository is not linked to the FSx file system. You use import and +export data repository tasks to perform bulk operations between your FSx for Lustre file +system and its linked data repositories. An example of a data repository task is exporting +any data and metadata changes, including POSIX metadata, to files, directories, and +symbolic links (symlinks) from your FSx file system to a linked data repository. You use +release data repository tasks to release data from your file system for files that are +exported to S3. The metadata of released files remains on the file system so users or +applications can still access released files by reading the files again, which will restore +data from Amazon S3 to the FSx for Lustre file system. To learn more about data repository +tasks, see Data Repository Tasks. To learn more about linking a data repository to your +file system, see Linking your file system to an S3 bucket. # Arguments - `file_system_id`: @@ -358,7 +432,13 @@ repository to your file system, see Linking your file system to an S3 bucket. completed. A CompletionReport provides a detailed report on the files that Amazon FSx processed that meet the criteria specified by the Scope parameter. For more information, see Working with Task Completion Reports. -- `type`: Specifies the type of data repository task to create. +- `type`: Specifies the type of data repository task to create. EXPORT_TO_REPOSITORY + tasks export from your Amazon FSx for Lustre file system to a linked data repository. + IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to + your Amazon FSx for Lustre file system. RELEASE_DATA_FROM_FILESYSTEM tasks release files + in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and + that meet your specified release criteria. AUTO_RELEASE_DATA tasks automatically release + files from an Amazon File Cache resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -366,15 +446,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Cache AUTO_RELEASE_DATA task that automatically releases files from the cache. - `"ClientRequestToken"`: - `"Paths"`: A list of paths for the data repository task to use when the task is - processed. If a path that you provide isn't valid, the task fails. For export tasks, the - list contains paths on the Amazon FSx file system from which the files are exported to the - Amazon S3 bucket. The default path is the file system root directory. The paths you provide - need to be relative to the mount point of the file system. If the mount point is /mnt/fsx - and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the - path to provide is path1. For import tasks, the list contains paths in the Amazon S3 - bucket from which POSIX metadata changes are imported to the Amazon FSx file system. The - path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is - optional). + processed. If a path that you provide isn't valid, the task fails. If you don't provide + paths, the default behavior is to export all files to S3 (for export tasks), import all + files from S3 (for import tasks), or release all exported files that meet the last accessed + time criteria (for release tasks). For export tasks, the list contains paths on the FSx + for Lustre file system from which the files are exported to the Amazon S3 bucket. The + default path is the file system root directory. The paths you provide need to be relative + to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is + a directory or file on the file system you want to export, then the path to provide is + path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX + metadata changes are imported to the FSx for Lustre file system. The path can be an S3 + bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional). For + release tasks, the list contains directory or file paths on the FSx for Lustre file system + from which to release exported files. If a directory is specified, files within the + directory are released. If a file path is specified, only that file is released. To release + all exported files in the file system, specify a forward slash (/) as the path. A file + must also meet the last accessed time criteria specified in for the file to be released. +- `"ReleaseConfiguration"`: The configuration that specifies the last accessed time + criteria for files that will be released from an Amazon FSx for Lustre file system. - `"Tags"`: """ function create_data_repository_task( @@ -540,7 +629,7 @@ system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token -doesn't exist, CreateFileSystem does the following: Creates a new, empty Amazon FSx file +doesn't exist, CreateFileSystem does the following: Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING. Returns the description of the file system in JSON format. The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation @@ -553,17 +642,19 @@ along with other information. - `storage_capacity`: Sets the storage capacity of the file system that you're creating, in gibibytes (GiB). FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, - as follows: For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types using SSD + as follows: For SCRATCH_2, PERSISTENT_2, and PERSISTENT_1 deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB. For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems. For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB. FSx for ONTAP - file systems - The amount of storage capacity that you can configure is from 1024 GiB up to - 196,608 GiB (192 TiB). FSx for OpenZFS file systems - The amount of storage capacity that - you can configure is from 64 GiB up to 524,288 GiB (512 TiB). FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value - that you set for StorageType as follows: For SSD storage, valid values are 32 GiB-65,536 - GiB (64 TiB). For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB). + of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum + is calculated as 524,288 * HAPairs. FSx for OpenZFS file systems - The amount of storage + capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). FSx for + Windows File Server file systems - The amount of storage capacity that you can configure + depends on the value that you set for StorageType as follows: For SSD storage, valid + values are 32 GiB-65,536 GiB (64 TiB). For HDD storage, valid values are 2000 GiB-65,536 + GiB (64 TiB). - `subnet_ids`: Specifies the IDs of the subnets that the file system will be accessible from. For Windows and ONTAP MULTI_AZ_1 deployment types,provide exactly two subnet IDs, one for the preferred file server and one for the standby file server. You specify one of these @@ -580,13 +671,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientRequestToken"`: A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. -- `"FileSystemTypeVersion"`: (Optional) For FSx for Lustre file systems, sets the Lustre - version for the file system that you're creating. Valid values are 2.10 and 2.12: 2.10 is - supported by the Scratch and Persistent_1 Lustre deployment types. 2.12 is supported by - all Lustre deployment types. 2.12 is required when setting FSx for Lustre DeploymentType to - PERSISTENT_2. Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, - then the default is 2.12. If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 - Lustre deployment type, the CreateFileSystem operation fails. +- `"FileSystemTypeVersion"`: For FSx for Lustre file systems, sets the Lustre version for + the file system that you're creating. Valid values are 2.10, 2.12, and 2.15: 2.10 is + supported by the Scratch and Persistent_1 Lustre deployment types. 2.12 is supported by + all Lustre deployment types, except for PERSISTENT_2 with a metadata configuration mode. + 2.15 is supported by all Lustre deployment types and is recommended for all new file + systems. Default value is 2.10, except for the following deployments: Default value is + 2.12 when DeploymentType is set to PERSISTENT_2 without a metadata configuration mode. + Default value is 2.15 when DeploymentType is set to PERSISTENT_2 with a metadata + configuration mode. - `"KmsKeyId"`: - `"LustreConfiguration"`: - `"OntapConfiguration"`: @@ -594,12 +687,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys created. - `"SecurityGroupIds"`: A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later - requests to describe the file system. + requests to describe the file system. You must specify a security group if you are + creating a Multi-AZ FSx for ONTAP file system in a VPC subnet that has been shared with + you. - `"StorageType"`: Sets the storage type for the file system that you're creating. Valid values are SSD and HDD. Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types. Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system - deployment types, and on PERSISTENT_1 Lustre file system deployment types. Default value + deployment types, and on PERSISTENT_1 Lustre file system deployment types. Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide. - `"Tags"`: The tags to apply to the file system that's being created. The key value of the @@ -692,8 +787,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. - `"FileSystemTypeVersion"`: Sets the version for the Amazon FSx for Lustre file system - that you're creating from a backup. Valid values are 2.10 and 2.12. You don't need to - specify FileSystemTypeVersion because it will be applied using the backup's + that you're creating from a backup. Valid values are 2.10, 2.12, and 2.15. You don't need + to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting. - `"KmsKeyId"`: @@ -709,7 +804,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys backup's storage capacity value. If you don't use the StorageCapacity parameter, the default is the backup's StorageCapacity value. If used to create a file system other than OpenZFS, you must provide a value that matches the backup's StorageCapacity value. If you - provide any other value, Amazon FSx responds with a 400 Bad Request. + provide any other value, Amazon FSx responds with with an HTTP status code 400 Bad Request. - `"StorageType"`: Sets the storage type for the Windows or OpenZFS file system that you're creating from a backup. Valid values are SSD and HDD. Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types. Set to HDD to use @@ -839,16 +934,17 @@ Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ActiveDirectoryConfiguration"`: Describes the self-managed Microsoft Active Directory to which you want to join the SVM. Joining an Active Directory provides user authentication - and access control for SMB clients, including Microsoft Windows and macOS client accessing + and access control for SMB clients, including Microsoft Windows and macOS clients accessing the file system. - `"ClientRequestToken"`: - `"RootVolumeSecurityStyle"`: The security style of the root volume of the SVM. Specify one of the following values: UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX - user as the service account. NTFS if the file system is managed by a Windows + user as the service account. NTFS if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data - uses a Windows user as the service account. MIXED if the file system is managed by both - UNIX and Windows administrators and users consist of both NFS and SMB clients. + uses a Microsoft Windows user as the service account. MIXED This is an advanced setting. + For more information, see Volume security style in the Amazon FSx for NetApp ONTAP User + Guide. - `"SvmAdminPassword"`: The password to use when managing the SVM using the NetApp ONTAP CLI or REST API. If you do not specify a password, you can still use the file system's fsxadmin user to manage the SVM. @@ -1055,7 +1151,7 @@ Deletes a data repository association on an Amazon FSx for Lustre file system. D data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository -associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding +associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type. # Arguments @@ -1157,17 +1253,27 @@ end Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted. To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines -(SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem +(SVMs) on the file system. Then provide a FileSystemId value to the DeleteFileSystem operation. By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's -retention policy, and must be manually deleted. The DeleteFileSystem operation returns -while the file system has the DELETING status. You can check the file system deletion -status by calling the DescribeFileSystems operation, which returns a list of file systems -in your account. If you pass the file system ID for a deleted file system, the -DescribeFileSystems operation returns a FileSystemNotFound error. If a data repository -task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will -fail with an HTTP status code 400 (Bad Request). The data in a deleted file system is -also deleted and can't be recovered by any means. +retention policy, and must be manually deleted. To delete an Amazon FSx for Lustre file +system, first unmount it from every connected Amazon EC2 instance, then provide a +FileSystemId value to the DeleteFileSystem operation. By default, Amazon FSx will not take +a final backup when the DeleteFileSystem operation is invoked. On file systems not linked +to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file +system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all +of your data is written back to S3 before deleting your file system, you can either monitor +for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can +run an export data repository task. If you have automatic export enabled and want to use an +export data repository task, you have to disable automatic export before executing the +export data repository task. The DeleteFileSystem operation returns while the file system +has the DELETING status. You can check the file system deletion status by calling the +DescribeFileSystems operation, which returns a list of file systems in your account. If you +pass the file system ID for a deleted file system, the DescribeFileSystems operation +returns a FileSystemNotFound error. If a data repository task is in a PENDING or EXECUTING +state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 +(Bad Request). The data in a deleted file system is also deleted and can't be recovered +by any means. # Arguments - `file_system_id`: The ID of the file system that you want to delete. @@ -1408,19 +1514,19 @@ end Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon -File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding -scratch_1 deployment type. You can use filters to narrow the response to include just data -repository associations for specific file systems (use the file-system-id filter with the -ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), -or data repository associations for a specific repository type (use the -data-repository-type filter with a value of S3 or NFS). If you don't use filters, the -response returns all data repository associations owned by your Amazon Web Services account -in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all -data repository associations, you can paginate the response by using the optional -MaxResults parameter to limit the number of data repository associations returned in a -response. If more data repository associations remain, a NextToken value is returned in the -response. In this case, send a later request with the NextToken request parameter set to -the value of NextToken from the last response. +File Cache resources and all FSx for Lustre 2.12 and 2,15 file systems, excluding scratch_1 +deployment type. You can use filters to narrow the response to include just data repository +associations for specific file systems (use the file-system-id filter with the ID of the +file system) or caches (use the file-cache-id filter with the ID of the cache), or data +repository associations for a specific repository type (use the data-repository-type filter +with a value of S3 or NFS). If you don't use filters, the response returns all data +repository associations owned by your Amazon Web Services account in the Amazon Web +Services Region of the endpoint that you're calling. When retrieving all data repository +associations, you can paginate the response by using the optional MaxResults parameter to +limit the number of data repository associations returned in a response. If more data +repository associations remain, a NextToken value is returned in the response. In this +case, send a later request with the NextToken request parameter set to the value of +NextToken from the last response. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1636,6 +1742,36 @@ function describe_file_systems( ) end +""" + describe_shared_vpc_configuration() + describe_shared_vpc_configuration(params::Dict{String,<:Any}) + +Indicates whether participant accounts in your organization can create Amazon FSx for +NetApp ONTAP Multi-AZ file systems in subnets that are shared by a virtual private cloud +(VPC) owner. For more information, see Creating FSx for ONTAP file systems in shared +subnets. + +""" +function describe_shared_vpc_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "DescribeSharedVpcConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_shared_vpc_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "DescribeSharedVpcConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_snapshots() describe_snapshots(params::Dict{String,<:Any}) @@ -1659,6 +1795,9 @@ multi-call iteration is unspecified. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: The filters structure. The supported names are file-system-id or volume-id. +- `"IncludeShared"`: Set to false (default) if you want to only see the snapshots owned by + your Amazon Web Services account. Set to true if you want to see the snapshots in your + account and the ones shared with you from another account. - `"MaxResults"`: - `"NextToken"`: - `"SnapshotIds"`: The IDs of the snapshots that you want to retrieve. This parameter value @@ -1738,11 +1877,11 @@ end Use this action to disassociate, or remove, one or more Domain Name Service (DNS) aliases from an Amazon FSx for Windows File Server file system. If you attempt to disassociate a -DNS alias that is not associated with the file system, Amazon FSx responds with a 400 Bad -Request. For more information, see Working with DNS Aliases. The system generated response -showing the DNS aliases that Amazon FSx is attempting to disassociate from the file system. -Use the API operation to monitor the status of the aliases Amazon FSx is disassociating -with the file system. +DNS alias that is not associated with the file system, Amazon FSx responds with an HTTP +status code 400 (Bad Request). For more information, see Working with DNS Aliases. The +system generated response showing the DNS aliases that Amazon FSx is attempting to +disassociate from the file system. Use the API operation to monitor the status of the +aliases Amazon FSx is disassociating with the file system. # Arguments - `aliases`: An array of one or more DNS alias names to disassociate, or remove, from the @@ -1951,6 +2090,54 @@ function restore_volume_from_snapshot( ) end +""" + start_misconfigured_state_recovery(file_system_id) + start_misconfigured_state_recovery(file_system_id, params::Dict{String,<:Any}) + +After performing steps to repair the Active Directory configuration of an FSx for Windows +File Server file system, use this action to initiate the process of Amazon FSx attempting +to reconnect to the file system. + +# Arguments +- `file_system_id`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: +""" +function start_misconfigured_state_recovery( + FileSystemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "StartMisconfiguredStateRecovery", + Dict{String,Any}( + "FileSystemId" => FileSystemId, "ClientRequestToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_misconfigured_state_recovery( + FileSystemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fsx( + "StartMisconfiguredStateRecovery", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FileSystemId" => FileSystemId, "ClientRequestToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -2040,7 +2227,7 @@ end Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 -and newer file systems, excluding scratch_1 deployment type. +and 2.15 file systems, excluding scratch_1 deployment type. # Arguments - `association_id`: The ID of the data repository association that you are updating. @@ -2148,17 +2335,19 @@ Use this operation to update the configuration of an existing Amazon FSx file sy can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime -SelfManagedActiveDirectoryConfiguration StorageCapacity ThroughputCapacity -WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following -properties: AutoImportPolicy AutomaticBackupRetentionDays -DailyAutomaticBackupStartTime DataCompressionType LustreRootSquashConfiguration -StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can -update the following properties: AddRouteTableIds AutomaticBackupRetentionDays -DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword -RemoveRouteTableIds StorageCapacity ThroughputCapacity -WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the -following properties: AutomaticBackupRetentionDays CopyTagsToBackups -CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration +SelfManagedActiveDirectoryConfiguration StorageCapacity StorageType +ThroughputCapacity DiskIopsConfiguration WeeklyMaintenanceStartTime For FSx for +Lustre file systems, you can update the following properties: AutoImportPolicy +AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType +LogConfiguration LustreRootSquashConfiguration MetadataConfiguration +PerUnitStorageThroughput StorageCapacity WeeklyMaintenanceStartTime For FSx for +ONTAP file systems, you can update the following properties: AddRouteTableIds +AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration + FsxAdminPassword HAPairs RemoveRouteTableIds StorageCapacity +ThroughputCapacity ThroughputCapacityPerHAPair WeeklyMaintenanceStartTime For +FSx for OpenZFS file systems, you can update the following properties: AddRouteTableIds + AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes +DailyAutomaticBackupStartTime DiskIopsConfiguration RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime # Arguments @@ -2194,6 +2383,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide. +- `"StorageType"`: - `"WindowsConfiguration"`: The configuration updates for an Amazon FSx for Windows File Server file system. """ @@ -2228,6 +2418,51 @@ function update_file_system( ) end +""" + update_shared_vpc_configuration() + update_shared_vpc_configuration(params::Dict{String,<:Any}) + +Configures whether participant accounts in your organization can create Amazon FSx for +NetApp ONTAP Multi-AZ file systems in subnets that are shared by a virtual private cloud +(VPC) owner. For more information, see the Amazon FSx for NetApp ONTAP User Guide. We +strongly recommend that participant-created Multi-AZ file systems in the shared VPC are +deleted before you disable this feature. Once the feature is disabled, these file systems +will enter a MISCONFIGURED state and behave like Single-AZ file systems. For more +information, see Important considerations before disabling shared VPC support for Multi-AZ +file systems. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: +- `"EnableFsxRouteTableUpdatesFromParticipantAccounts"`: Specifies whether participant + accounts can create FSx for ONTAP Multi-AZ file systems in shared subnets. Set to true to + enable or false to disable. +""" +function update_shared_vpc_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "UpdateSharedVpcConfiguration", + Dict{String,Any}("ClientRequestToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_shared_vpc_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fsx( + "UpdateSharedVpcConfiguration", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClientRequestToken" => string(uuid4())), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_snapshot(name, snapshot_id) update_snapshot(name, snapshot_id, params::Dict{String,<:Any}) diff --git a/src/services/gamelift.jl b/src/services/gamelift.jl index eeefa6cfb6..cfb70d02b4 100644 --- a/src/services/gamelift.jl +++ b/src/services/gamelift.jl @@ -13,18 +13,18 @@ configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit. When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for -your game to get acceptance from all players in the ticket. Acceptances are only valid for -tickets when they are in this status; all other acceptances result in an error. To register -acceptance, specify the ticket ID, a response, and one or more players. Once all players -have registered acceptance, the matchmaking tickets advance to status PLACING, where a new -game session is created for the match. If any player rejects the match, or if acceptances -are not received before a specified timeout, the proposed match is dropped. The matchmaking -tickets are then handled in one of two ways: For tickets where one or more players rejected -the match or failed to respond, the ticket status is set to CANCELLED, and processing is -terminated. For tickets where players have accepted or not yet responded, the ticket status -is returned to SEARCHING to find a new match. A new matchmaking request for these players -can be submitted as needed. Learn more Add FlexMatch to a game client FlexMatch -events (reference) +your game to get acceptance from all players in each ticket. Calls to this action are only +valid for tickets that are in this status; calls for tickets not in this status result in +an error. To register acceptance, specify the ticket ID, one or more players, and an +acceptance response. When all players have accepted, Amazon GameLift advances the +matchmaking tickets to status PLACING, and attempts to create a new game session for the +match. If any player rejects the match, or if acceptances are not received before a +specified timeout, the proposed match is dropped. Each matchmaking ticket in the failed +match is handled as follows: If the ticket has one or more players who rejected the +match or failed to respond, the ticket status is set CANCELLED and processing is +terminated. If all players in the ticket accepted the match, the ticket status is +returned to SEARCHING to find a new match. Learn more Add FlexMatch to a game client + FlexMatch events (reference) # Arguments - `acceptance_type`: Player response to the proposed match. @@ -86,9 +86,9 @@ connection information that players can use to connect to the game server. To c server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or -player information. Filter options may be included to further restrict how a game server is -chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game -server is successfully claimed, connection information is returned. A claimed game server's +player information. Add filter options to further restrict how a game server is chosen, +such as only allowing game servers on ACTIVE instances to be claimed. When a game server is +successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server @@ -96,8 +96,8 @@ reverts to unclaimed status and is available to be claimed by another request. T time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is -running on an instance in DRAINING status and provided filter option does not allow placing -on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide +running on an instance in DRAINING status and the provided filter option does not allow +placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide # Arguments - `game_server_group_name`: A unique identifier for the game server group where the game @@ -271,32 +271,155 @@ function create_build( ) end +""" + create_container_group_definition(container_definitions, name, operating_system, total_cpu_limit, total_memory_limit) + create_container_group_definition(container_definitions, name, operating_system, total_cpu_limit, total_memory_limit, params::Dict{String,<:Any}) + + This operation is used with the Amazon GameLift containers feature, which is currently in +public preview. Creates a ContainerGroupDefinition resource that describes a set of +containers for hosting your game server with Amazon GameLift managed EC2 hosting. An Amazon +GameLift container group is similar to a container \"task\" and \"pod\". Each container +group can have one or more containers. Use container group definitions when you create a +container fleet. Container group definitions determine how Amazon GameLift deploys your +containers to each instance in a container fleet. You can create two types of container +groups, based on scheduling strategy: A replica container group manages the containers +that run your game server application and supporting software. Replica container groups +might be replicated multiple times on each fleet instance, depending on instance resources. + A daemon container group manages containers that run other software, such as background +services, logging, or test processes. You might use a daemon container group for processes +that need to run only once per fleet instance, or processes that need to persist +independently of the replica container group. To create a container group definition, +specify a group name, a list of container definitions, and maximum total CPU and memory +requirements for the container group. Specify an operating system and scheduling strategy +or use the default values. When using the Amazon Web Services CLI tool, you can pass in +your container definitions as a JSON file. This operation requires Identity and Access +Management (IAM) permissions to access container images in Amazon ECR repositories. See +IAM permissions for Amazon GameLift for help setting the appropriate permissions. If +successful, this operation creates a new ContainerGroupDefinition resource with an ARN +value assigned. You can't change the properties of a container group definition. Instead, +create a new one. Learn more Create a container group definition Container fleet +design guide Create a container definition as a JSON file + +# Arguments +- `container_definitions`: Definitions for all containers in this group. Each container + definition identifies the container image and specifies configuration settings for the + container. See the Container fleet design guide for container guidelines. +- `name`: A descriptive identifier for the container group definition. The name value must + be unique in an Amazon Web Services Region. +- `operating_system`: The platform that is used by containers in the container group + definition. All containers in a group must run on the same operating system. +- `total_cpu_limit`: The maximum amount of CPU units to allocate to the container group. + Set this parameter to an integer value in CPU units (1 vCPU is equal to 1024 CPU units). + All containers in the group share this memory. If you specify CPU limits for individual + containers, set this parameter based on the following guidelines. The value must be equal + to or greater than the sum of the CPU limits for all containers in the group. +- `total_memory_limit`: The maximum amount of memory (in MiB) to allocate to the container + group. All containers in the group share this memory. If you specify memory limits for + individual containers, set this parameter based on the following guidelines. The value must + be (1) greater than the sum of the soft memory limits for all containers in the group, and + (2) greater than any individual container's hard memory limit. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"SchedulingStrategy"`: The method for deploying the container group across fleet + instances. A replica container group might have multiple copies on each fleet instance. A + daemon container group has one copy per fleet instance. Default value is REPLICA. +- `"Tags"`: A list of labels to assign to the container group definition resource. Tags are + developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for + resource management, access management and cost allocation. For more information, see + Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. +""" +function create_container_group_definition( + ContainerDefinitions, + Name, + OperatingSystem, + TotalCpuLimit, + TotalMemoryLimit; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return gamelift( + "CreateContainerGroupDefinition", + Dict{String,Any}( + "ContainerDefinitions" => ContainerDefinitions, + "Name" => Name, + "OperatingSystem" => OperatingSystem, + "TotalCpuLimit" => TotalCpuLimit, + "TotalMemoryLimit" => TotalMemoryLimit, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_container_group_definition( + ContainerDefinitions, + Name, + OperatingSystem, + TotalCpuLimit, + TotalMemoryLimit, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return gamelift( + "CreateContainerGroupDefinition", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ContainerDefinitions" => ContainerDefinitions, + "Name" => Name, + "OperatingSystem" => OperatingSystem, + "TotalCpuLimit" => TotalCpuLimit, + "TotalMemoryLimit" => TotalMemoryLimit, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_fleet(name) create_fleet(name, params::Dict{String,<:Any}) -Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host your custom -game server or Realtime Servers. Use this operation to configure the computing resources -for your fleet and provide instructions for running game servers on each instance. Most -Amazon GameLift fleets can deploy instances to multiple locations, including the home -Region (where the fleet is created) and an optional set of remote locations. Fleets that -are created in the following Amazon Web Services Regions support multiple locations: -us-east-1 (N. Virginia), us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland), -ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul). Fleets that -are created in other Amazon GameLift Regions can deploy instances in the fleet's home -Region only. All fleet instances use the same configuration regardless of location; -however, you can adjust capacity settings and turn auto-scaling on/off for each location. -To create a fleet, choose the hardware for your instances, specify a game server build or -Realtime script to deploy, and provide a runtime configuration to direct Amazon GameLift -how to start and run game servers on each instance in the fleet. Set permissions for -inbound traffic to your game servers, and enable optional features as needed. When creating -a multi-location fleet, provide a list of additional remote locations. If you need to debug -your fleet, fetch logs, view performance metrics or other actions on the fleet, create the -development fleet with port 22/3389 open. As a best practice, we recommend opening ports -for remote access only when you need them and closing them when you're finished. If -successful, this operation creates a new Fleet resource and places it in NEW status, which -prompts Amazon GameLift to initiate the fleet creation workflow. Learn more Setting up -fleets Debug fleet creation issues Multi-location fleets + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Creates a fleet of compute resources to host your game +servers. Use this operation to set up the following types of fleets based on compute type: + Managed EC2 fleet An EC2 fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) +instances. Your game server build is deployed to each fleet instance. Amazon GameLift +manages the fleet's instances and controls the lifecycle of game server processes, which +host game sessions for players. EC2 fleets can have instances in multiple locations. Each +instance in the fleet is designated a Compute. To create an EC2 fleet, provide these +required parameters: Either BuildId or ScriptId ComputeType set to EC2 (the default +value) EC2InboundPermissions EC2InstanceType FleetType Name +RuntimeConfiguration with at least one ServerProcesses configuration If successful, this +operation creates a new fleet resource and places it in NEW status while Amazon GameLift +initiates the fleet creation workflow. To debug your fleet, fetch logs, view performance +metrics or other actions on the fleet, create a development fleet with port 22/3389 open. +As a best practice, we recommend opening ports for remote access only when you need them +and closing them when you're finished. When the fleet status is ACTIVE, you can adjust +capacity settings and turn autoscaling on/off for each location. Managed container fleet +A container fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your +container architecture is deployed to each fleet instance based on the fleet configuration. +Amazon GameLift manages the containers on each fleet instance and controls the lifecycle of +game server processes, which host game sessions for players. Container fleets can have +instances in multiple locations. Each container on an instance that runs game server +processes is registered as a Compute. To create a container fleet, provide these required +parameters: ComputeType set to CONTAINER ContainerGroupsConfiguration +EC2InboundPermissions EC2InstanceType FleetType set to ON_DEMAND Name +RuntimeConfiguration with at least one ServerProcesses configuration If successful, this +operation creates a new fleet resource and places it in NEW status while Amazon GameLift +initiates the fleet creation workflow. When the fleet status is ACTIVE, you can adjust +capacity settings and turn autoscaling on/off for each location. Anywhere fleet An +Anywhere fleet represents compute resources that are not owned or managed by Amazon +GameLift. You might create an Anywhere fleet with your local machine for testing, or use +one to host game servers with on-premises hardware or other game hosting solutions. To +create an Anywhere fleet, provide these required parameters: ComputeType set to ANYWHERE + Locations specifying a custom location Name If successful, this operation creates +a new fleet resource and places it in ACTIVE status. You can register computes with a fleet +in ACTIVE status. Learn more Setting up fleets Setting up a container fleet Debug +fleet creation issues Multi-location fleets # Arguments - `name`: A descriptive label that is associated with a fleet. Fleet names do not need to @@ -305,9 +428,10 @@ fleets Debug fleet creation issues Multi-location fleets # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AnywhereConfiguration"`: Amazon GameLift Anywhere configuration options. -- `"BuildId"`: The unique identifier for a custom game server build to be deployed on fleet - instances. You can use either the build ID or ARN. The build must be uploaded to Amazon - GameLift and in READY status. This fleet property cannot be changed later. +- `"BuildId"`: The unique identifier for a custom game server build to be deployed to a + fleet with compute type EC2. You can use either the build ID or ARN. The build must be + uploaded to Amazon GameLift and in READY status. This fleet property can't be changed after + the fleet is created. - `"CertificateConfiguration"`: Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the @@ -319,35 +443,58 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see Supported Regions in the Certificate Manager User Guide. -- `"ComputeType"`: The type of compute resource used to host your game servers. You can use - your own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances with - managed Amazon GameLift. By default, this property is set to EC2. +- `"ComputeType"`: The type of compute resource used to host your game servers. EC2 – + The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the + default setting. CONTAINER – Container images with your game server build and + supporting software are deployed to Amazon EC2 instances for cloud hosting. With this + compute type, you must specify the ContainerGroupsConfiguration parameter. ANYWHERE – + Game servers or container images with your game server and supporting software are deployed + to compute resources that are provided and managed by you. With this compute type, you can + also set the AnywhereConfiguration parameter. +- `"ContainerGroupsConfiguration"`: The container groups to deploy to instances in the + container fleet and other fleet-level configuration settings. Use the + CreateContainerGroupDefinition action to create container groups. A container fleet must + have exactly one replica container group, and can optionally have one daemon container + group. You can't change this property after you create the fleet. - `"Description"`: A description for the fleet. -- `"EC2InboundPermissions"`: The allowed IP address ranges and port settings that allow - inbound traffic to access game sessions on this fleet. If the fleet is hosting a custom - game build, this property must be set before players can connect to game sessions. For - Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. -- `"EC2InstanceType"`: The Amazon GameLift-supported Amazon EC2 instance type to use for - all fleet instances. Instance type determines the computing resources that will be used to - host your game servers, including CPU, memory, storage, and networking capacity. See Amazon - Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types. +- `"EC2InboundPermissions"`: The IP address ranges and port settings that allow inbound + traffic to access game server processes and other processes on this fleet. Set this + parameter for EC2 and container fleets. You can leave this parameter empty when creating + the fleet, but you must call UpdateFleetPortSettings to set it before players can connect + to game sessions. As a best practice, we recommend opening ports for remote access only + when you need them and closing them when you're finished. For Realtime Servers fleets, + Amazon GameLift automatically sets TCP and UDP ranges. To manage inbound access for a + container fleet, set this parameter to the same port numbers that you set for the fleet's + connection port range. During the life of the fleet, update this parameter to control which + connection ports are open to inbound traffic. +- `"EC2InstanceType"`: The Amazon GameLift-supported Amazon EC2 instance type to use with + EC2 and container fleets. Instance type determines the computing resources that will be + used to host your game servers, including CPU, memory, storage, and networking capacity. + See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 + instance types. - `"FleetType"`: Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to ON_DEMAND. Learn more about when to use On-Demand versus - Spot Instances. This property cannot be changed after the fleet is created. -- `"InstanceRoleArn"`: A unique identifier for an IAM role that manages access to your - Amazon Web Services services. With an instance role ARN set, any application that runs on - an instance in this fleet can assume the role, including install scripts, server processes, - and daemons (background processes). Create a role or look up a role's ARN by using the IAM - dashboard in the Amazon Web Services Management Console. Learn more about using on-box - credentials for your game servers at Access external resources from a game server. This - property cannot be changed after the fleet is created. + Spot Instances. This fleet property can't be changed after the fleet is created. +- `"InstanceRoleArn"`: A unique identifier for an IAM role with access permissions to other + Amazon Web Services services. Any application that runs on an instance in the + fleet--including install scripts, server processes, and other processes--can use these + permissions to interact with Amazon Web Services resources that you own or have access to. + For more information about using the role with your game server builds, see Communicate + with other Amazon Web Services resources from your fleets. This fleet property can't be + changed after the fleet is created. +- `"InstanceRoleCredentialsProvider"`: Prompts Amazon GameLift to generate a shared + credentials file for the IAM role that's defined in InstanceRoleArn. The shared credentials + file is stored on each fleet instance and refreshed as needed. Use shared credentials for + applications that are deployed along with the game server executable, if the game server is + integrated with server SDK version 5.x. For more information about using shared + credentials, see Communicate with other Amazon Web Services resources from your fleets. - `"Locations"`: A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in Amazon Web Services Regions that support multiple locations. You can add any Amazon GameLift-supported Amazon Web Services Region as a remote location, in the form of an Amazon Web Services - Region code such as us-west-2. To create a fleet with instances in the home Region only, - don't use this parameter. To use this parameter, Amazon GameLift requires you to use your - home location in the request. + Region code, such as us-west-2 or Local Zone code. To create a fleet with instances in the + home Region only, don't set this parameter. When using this parameter, Amazon GameLift + requires you to include your home location in the request. - `"LogPaths"`: This parameter is no longer used. To specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. For more @@ -371,15 +518,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys peering in VPC Peering with Amazon GameLift Fleets. - `"ResourceCreationLimitPolicy"`: A policy that limits the number of game sessions that an individual player can create on instances in this fleet within a specified span of time. -- `"RuntimeConfiguration"`: Instructions for how to launch and maintain server processes on - instances in the fleet. The runtime configuration defines one or more server process - configurations, each identifying a build executable or Realtime script file and the number - of processes of that type to run concurrently. The RuntimeConfiguration parameter is - required unless the fleet is being configured using the older parameters ServerLaunchPath - and ServerLaunchParameters, which are still supported for backward compatibility. -- `"ScriptId"`: The unique identifier for a Realtime configuration script to be deployed on - fleet instances. You can use either the script ID or ARN. Scripts must be uploaded to - Amazon GameLift prior to creating the fleet. This fleet property cannot be changed later. +- `"RuntimeConfiguration"`: Instructions for how to launch and run server processes on the + fleet. Set runtime configuration for EC2 fleets and container fleets. For an Anywhere + fleets, set this parameter only if the fleet is running the Amazon GameLift Agent. The + runtime configuration defines one or more server process configurations. Each server + process identifies a game executable or Realtime script file and the number of processes to + run concurrently. This parameter replaces the parameters ServerLaunchPath and + ServerLaunchParameters, which are still supported for backward compatibility. +- `"ScriptId"`: The unique identifier for a Realtime configuration script to be deployed to + a fleet with compute type EC2. You can use either the script ID or ARN. Scripts must be + uploaded to Amazon GameLift prior to creating the fleet. This fleet property can't be + changed after the fleet is created. - `"ServerLaunchParameters"`: This parameter is no longer used. Specify server launch parameters using the RuntimeConfiguration parameter. Requests that use this parameter instead continue to be valid. @@ -414,17 +563,18 @@ end create_fleet_locations(fleet_id, locations) create_fleet_locations(fleet_id, locations, params::Dict{String,<:Any}) -Adds remote locations to a fleet and begins populating the new locations with EC2 -instances. The new instances conform to the fleet's instance type, auto-scaling, and other -configuration settings. This operation cannot be used with fleets that don't support -remote locations. Fleets can have multiple locations only if they reside in Amazon Web -Services Regions that support this feature and were created after the feature was released -in March 2021. To add fleet locations, specify the fleet to be updated and provide a list -of one or more locations. If successful, this operation returns the list of added -locations with their status set to NEW. Amazon GameLift initiates the process of starting -an instance in each added location. You can track the status of each new location by -monitoring location creation events using DescribeFleetEvents. Learn more Setting up -fleets Multi-location fleets + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Adds remote locations to an EC2 or container fleet and +begins populating the new locations with instances. The new instances conform to the +fleet's instance type, auto-scaling, and other configuration settings. You can't add +remote locations to a fleet that resides in an Amazon Web Services Region that doesn't +support multiple locations. Fleets created prior to March 2021 can't support multiple +locations. To add fleet locations, specify the fleet to be updated and provide a list of +one or more locations. If successful, this operation returns the list of added locations +with their status set to NEW. Amazon GameLift initiates the process of starting an instance +in each added location. You can track the status of each new location by monitoring +location creation events using DescribeFleetEvents. Learn more Setting up fleets +Multi-location fleets # Arguments - `fleet_id`: A unique identifier for the fleet to add locations to. You can use either the @@ -628,23 +778,23 @@ end Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift -game session placement feature with StartGameSessionPlacement , which uses FleetIQ -algorithms and queues to optimize the placement process. When creating a game session, you +game session placement feature with StartGameSessionPlacement , which uses the FleetIQ +algorithm and queues to optimize the placement process. When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration -settings. The fleet must be in ACTIVE status before a game session can be created in it. -This operation can be used in the following ways: To create a game session on an -instance in a fleet's home Region, provide a fleet or alias ID along with your game session -configuration. To create a game session on an instance in a fleet's remote location, -provide a fleet or alias ID and a location name, along with your game session -configuration. If successful, a workflow is initiated to start a new game session. A -GameSession object is returned containing the game session configuration and status. When -the status is ACTIVE, game session connection information is provided and player sessions -can be created for the game session. By default, newly created game sessions are open to -new players. You can restrict new player access by using UpdateGameSession to change the -game session's player session creation policy. Game session logs are retained for all -active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download -the log files. Available in Amazon GameLift Local. Learn more Start a game session -All APIs by task +settings. The target fleet must be in ACTIVE status. You can use this operation in the +following ways: To create a game session on an instance in a fleet's home Region, +provide a fleet or alias ID along with your game session configuration. To create a game +session on an instance in a fleet's remote location, provide a fleet or alias ID and a +location name, along with your game session configuration. To create a game session on +an instance in an Anywhere fleet, specify the fleet's custom location. If successful, +Amazon GameLift initiates a workflow to start a new game session and returns a GameSession +object containing the game session configuration and status. When the game session status +is ACTIVE, it is updated with connection information and you can create player sessions for +the game session. By default, newly created game sessions are open to new players. You can +restrict new player access by using UpdateGameSession to change the game session's player +session creation policy. Amazon GameLift retains logs for active for 14 days. To access the +logs, call GetGameSessionLogUrl to download the log files. Available in Amazon GameLift +Local. Learn more Start a game session All APIs by task # Arguments - `maximum_player_session_count`: The maximum number of players that can be connected @@ -666,9 +816,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"FleetId"`: A unique identifier for the fleet to create a game session in. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both. -- `"GameProperties"`: A set of custom properties for a game session, formatted as key:value - pairs. These properties are passed to a game server process with a request to start a new - game session (see Start a Game Session). +- `"GameProperties"`: A set of key-value pairs that can store custom data in a game + session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. For an example, see + Create a game session with custom properties. - `"GameSessionData"`: A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session). @@ -686,7 +836,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys deleted. - `"Location"`: A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a - remote location with an Amazon Web Services Region code such as us-west-2. + remote location with an Amazon Web Services Region code such as us-west-2. When using an + Anywhere fleet, this parameter is required and must be set to the Anywhere fleet's custom + location. - `"Name"`: A descriptive label that is associated with a game session. Session names do not need to be unique. """ @@ -878,8 +1030,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accept a proposed match, if acceptance is required. - `"AdditionalPlayerCount"`: The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single - 12-person team, and the additional player count is set to 2, only 10 players are selected - for the match. This parameter is not used if FlexMatchMode is set to STANDALONE. + 10-person team, and the additional player count is set to 2, 10 players will be selected + for the match and 2 more player slots will be open for future players. This parameter is + not used if FlexMatchMode is set to STANDALONE. - `"BackfillMode"`: The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have Amazon GameLift @@ -894,11 +1047,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event. WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift queue to start a game session for the match. -- `"GameProperties"`: A set of custom properties for a game session, formatted as key:value - pairs. These properties are passed to a game server process with a request to start a new - game session (see Start a Game Session). This information is added to the new GameSession - object that is created for a successful match. This parameter is not used if FlexMatchMode - is set to STANDALONE. +- `"GameProperties"`: A set of key-value pairs that can store custom data in a game + session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. This information is + added to the new GameSession object that is created for a successful match. This parameter + is not used if FlexMatchMode is set to STANDALONE. - `"GameSessionData"`: A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession @@ -1403,19 +1555,54 @@ function delete_build( ) end +""" + delete_container_group_definition(name) + delete_container_group_definition(name, params::Dict{String,<:Any}) + + This operation is used with the Amazon GameLift containers feature, which is currently in +public preview. Deletes a container group definition resource. You can delete a container +group definition if there are no fleets using the definition. To delete a container group +definition, identify the resource to delete. Learn more Manage a container group +definition + +# Arguments +- `name`: The unique identifier for the container group definition to delete. You can use + either the Name or ARN value. + +""" +function delete_container_group_definition( + Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "DeleteContainerGroupDefinition", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_container_group_definition( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "DeleteContainerGroupDefinition", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_fleet(fleet_id) delete_fleet(fleet_id, params::Dict{String,<:Any}) -Deletes all resources and information related a fleet. Any current fleet instances, -including those in remote locations, are shut down. You don't need to call -DeleteFleetLocations separately. If the fleet being deleted has a VPC peering connection, -you first need to get a valid authorization (good for 24 hours) by calling -CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering -connection. To delete a fleet, specify the fleet ID to be terminated. During the deletion -process the fleet status is changed to DELETING. When completed, the status switches to -TERMINATED and the fleet event FLEET_DELETED is sent. Learn more Setting up Amazon -GameLift Fleets +Deletes all resources and information related to a fleet and shuts down any currently +running fleet instances, including those in remote locations. If the fleet being deleted +has a VPC peering connection, you first need to get a valid authorization (good for 24 +hours) by calling CreateVpcPeeringAuthorization. You don't need to explicitly delete the +VPC peering connection. To delete a fleet, specify the fleet ID to be terminated. During +the deletion process, the fleet status is changed to DELETING. When completed, the status +switches to TERMINATED and the fleet event FLEET_DELETED is emitted. Learn more Setting +up Amazon GameLift Fleets # Arguments - `fleet_id`: A unique identifier for the fleet to be deleted. You can use either the fleet @@ -1588,8 +1775,8 @@ end delete_location(location_name, params::Dict{String,<:Any}) Deletes a custom location. Before deleting a custom location, review any fleets currently -using the custom location and deregister the location if it is in use. For more information -see, DeregisterCompute. +using the custom location and deregister the location if it is in use. For more +information, see DeregisterCompute. # Arguments - `location_name`: The location name of the custom location to be deleted. @@ -1879,12 +2066,22 @@ end deregister_compute(compute_name, fleet_id) deregister_compute(compute_name, fleet_id, params::Dict{String,<:Any}) -Removes a compute resource from the specified fleet. Deregister your compute resources -before you delete the compute. + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Removes a compute resource from an Amazon GameLift +Anywhere fleet or container fleet. Deregistered computes can no longer host game sessions +through Amazon GameLift. For an Anywhere fleet or a container fleet that's running the +Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an +Anywhere fleet that doesn't use the Agent, call this operation to deregister fleet +computes. To deregister a compute, call this operation from the compute that's being +deregistered and specify the compute name and the fleet ID. # Arguments -- `compute_name`: The name of the compute resource you want to delete. -- `fleet_id`: >A unique identifier for the fleet the compute resource is registered to. +- `compute_name`: The unique identifier of the compute resource to deregister. For an + Anywhere fleet compute, use the registered compute name. For a container fleet, use the + compute name (for example, + a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN. +- `fleet_id`: A unique identifier for the fleet the compute resource is currently + registered to. """ function deregister_compute( @@ -2038,14 +2235,24 @@ end describe_compute(compute_name, fleet_id) describe_compute(compute_name, fleet_id, params::Dict{String,<:Any}) -Retrieves properties for a compute resource. To request a compute resource specify the -fleet ID and compute name. If successful, Amazon GameLift returns an object containing the -build properties. + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Retrieves properties for a compute resource in an Amazon +GameLift fleet. To get a list of all computes in a fleet, call ListCompute. To request +information on a specific compute, provide the fleet ID and compute name. If successful, +this operation returns details for the requested compute resource. Depending on the fleet's +compute type, the result includes the following information: For EC2 fleets, this +operation returns information about the EC2 instance. For ANYWHERE fleets, this operation +returns information about the registered compute. For CONTAINER fleets, this operation +returns information about the container that's registered as a compute, and the instance +it's running on. The compute name is the container name. # Arguments -- `compute_name`: A descriptive label that is associated with the compute resource - registered to your fleet. -- `fleet_id`: A unique identifier for the fleet the compute is registered to. +- `compute_name`: The unique identifier of the compute resource to retrieve properties for. + For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, + use the instance ID. For a container fleet, use the compute name (for example, + a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN. +- `fleet_id`: A unique identifier for the fleet that the compute belongs to. You can use + either the fleet ID or ARN value. """ function describe_compute( @@ -2078,6 +2285,42 @@ function describe_compute( ) end +""" + describe_container_group_definition(name) + describe_container_group_definition(name, params::Dict{String,<:Any}) + + This operation is used with the Amazon GameLift containers feature, which is currently in +public preview. Retrieves the properties of a container group definition, including all +container definitions in the group. To retrieve a container group definition, provide a +resource identifier. If successful, this operation returns the complete properties of the +container group definition. Learn more Manage a container group definition + +# Arguments +- `name`: The unique identifier for the container group definition to retrieve properties + for. You can use either the Name or ARN value. + +""" +function describe_container_group_definition( + Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "DescribeContainerGroupDefinition", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_container_group_definition( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "DescribeContainerGroupDefinition", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_ec2_instance_limits() describe_ec2_instance_limits(params::Dict{String,<:Any}) @@ -2141,16 +2384,18 @@ end describe_fleet_attributes() describe_fleet_attributes(params::Dict{String,<:Any}) -Retrieves core fleet-wide properties, including the computing hardware and deployment -configuration for all instances in the fleet. This operation can be used in the following -ways: To get attributes for one or more specific fleets, provide a list of fleet IDs or -fleet ARNs. To get attributes for all fleets, do not provide a fleet identifier. When -requesting attributes for multiple fleets, use the pagination parameters to retrieve -results as a set of sequential pages. If successful, a FleetAttributes object is returned -for each fleet requested, unless the fleet identifier is not found. Some API operations -limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, -the request fails and the error message contains the maximum allowed number. Learn more -Setting up Amazon GameLift fleets + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Retrieves core fleet-wide properties for fleets in an +Amazon Web Services Region. Properties include the computing hardware and deployment +configuration for instances in the fleet. You can use this operation in the following ways: + To get attributes for specific fleets, provide a list of fleet IDs or fleet ARNs. To +get attributes for all fleets, do not provide a fleet identifier. When requesting +attributes for multiple fleets, use the pagination parameters to retrieve results as a set +of sequential pages. If successful, a FleetAttributes object is returned for each fleet +requested, unless the fleet identifier is not found. Some API operations limit the number +of fleet IDs that allowed in one request. If a request exceeds this limit, the request +fails and the error message contains the maximum allowed number. Learn more Setting up +Amazon GameLift fleets # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2185,20 +2430,21 @@ end describe_fleet_capacity() describe_fleet_capacity(params::Dict{String,<:Any}) -Retrieves the resource capacity settings for one or more fleets. The data returned includes -the current fleet capacity (number of EC2 instances), and settings that can control how -capacity scaling. For fleets with remote locations, this operation retrieves data for the -fleet's home Region only. This operation can be used in the following ways: To get -capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. - To get capacity data for all fleets, do not provide a fleet identifier. When requesting -multiple fleets, use the pagination parameters to retrieve results as a set of sequential -pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each -FleetCapacity object includes a Location property, which is set to the fleet's home Region. -When a list of fleet IDs is provided, attribute objects are returned only for fleets that -currently exist. Some API operations may limit the number of fleet IDs that are allowed in -one request. If a request exceeds this limit, the request fails and the error message -includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift -metrics for fleets + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Retrieves the resource capacity settings for one or more +fleets. For a container fleet, this operation also returns counts for replica container +groups. With multi-location fleets, this operation retrieves data for the fleet's home +Region only. To retrieve capacity for remote locations, see DescribeFleetLocationCapacity. +This operation can be used in the following ways: To get capacity data for one or more +specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all +fleets, do not provide a fleet identifier. When requesting multiple fleets, use the +pagination parameters to retrieve results as a set of sequential pages. If successful, a +FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object +includes a Location property, which is set to the fleet's home Region. Capacity values are +returned only for fleets that currently exist. Some API operations may limit the number of +fleet IDs that are allowed in one request. If a request exceeds this limit, the request +fails and the error message includes the maximum allowed. Learn more Setting up Amazon +GameLift fleets GameLift metrics for fleets # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2335,7 +2581,8 @@ end Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested -fleet location. Use this operation to retrieve capacity information for a fleet's remote +fleet location. For a container fleet, this operation also returns counts for replica +container groups. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn @@ -2431,17 +2678,18 @@ end describe_fleet_port_settings(fleet_id) describe_fleet_port_settings(fleet_id, params::Dict{String,<:Any}) -Retrieves a fleet's inbound connection permissions. Connection permissions specify the -range of IP addresses and port settings that incoming traffic can use to access server -processes in the fleet. Game sessions that are running on instances in the fleet must use -connections that fall in this range. This operation can be used in the following ways: -To retrieve the inbound connection permissions for a fleet, identify the fleet's unique -identifier. To check the status of recent updates to a fleet remote location, specify -the fleet ID and a location. Port setting updates can take time to propagate across all -locations. If successful, a set of IpPermission objects is returned for the requested -fleet ID. When a location is specified, a pending status is included. If the requested -fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift -fleets +Retrieves a fleet's inbound connection permissions. Connection permissions specify IP +addresses and port settings that incoming traffic can use to access server processes in the +fleet. Game server processes that are running in the fleet must use a port that falls +within this range. To connect to game server processes on a container fleet, the port +settings should include one or more of the fleet's connection ports. Use this operation in +the following ways: To retrieve the port settings for a fleet, identify the fleet's +unique identifier. To check the status of recent updates to a fleet remote location, +specify the fleet ID and a location. Port setting updates can take time to propagate across +all locations. If successful, a set of IpPermission objects is returned for the +requested fleet ID. When specifying a location, this operation returns a pending status. If +the requested fleet has been deleted, the result set is empty. Learn more Setting up +Amazon GameLift fleets # Arguments - `fleet_id`: A unique identifier for the fleet to retrieve port settings for. You can use @@ -2871,15 +3119,19 @@ end describe_instances(fleet_id) describe_instances(fleet_id, params::Dict{String,<:Any}) -Retrieves information about a fleet's instances, including instance IDs, connection data, -and status. This operation can be used in the following ways: To get information on all -instances that are deployed to a fleet's home Region, provide the fleet ID. To get -information on all instances that are deployed to a fleet's remote location, provide the -fleet ID and location name. To get information on a specific instance in a fleet, provide -the fleet ID and instance ID. Use the pagination parameters to retrieve results as a set -of sequential pages. If successful, an Instance object is returned for each requested -instance. Instances are not returned in any particular order. Learn more Remotely -Access Fleet Instances Debug Fleet Issues Related actions All APIs by task +Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, +including instance ID, connection data, and status. You can use this operation with a +multi-location fleet to get location-specific instance information. As an alternative, use +the operations ListCompute and DescribeCompute to retrieve information for compute +resources, including EC2 and Anywhere fleets. You can call this operation in the following +ways: To get information on all instances in a fleet's home Region, specify the fleet ID. + To get information on all instances in a fleet's remote location, specify the fleet ID +and location name. To get information on a specific instance in a fleet, specify the +fleet ID and instance ID. Use the pagination parameters to retrieve results as a set of +sequential pages. If successful, this operation returns Instance objects for each +requested instance, listed in no particular order. If you call this operation for an +Anywhere fleet, you receive an InvalidRequestException. Learn more Remotely connect to +fleet instances Debug fleet issues Related actions All APIs by task # Arguments - `fleet_id`: A unique identifier for the fleet to retrieve instance information for. You @@ -3053,9 +3305,11 @@ following ways: To retrieve a specific player session, provide the player ses only. To retrieve all player sessions in a game session, provide the game session ID only. To retrieve all player sessions for a specific player, provide a player ID only. To request player sessions, specify either a player session ID, game session ID, or player -ID. You can filter this request by player session status. Use the pagination parameters to -retrieve results as a set of sequential pages. If successful, a PlayerSession object is -returned for each session that matches the request. Related actions All APIs by task +ID. You can filter this request by player session status. If you provide a specific +PlayerSessionId or PlayerId, Amazon GameLift ignores the filter criteria. Use the +pagination parameters to retrieve results as a set of sequential pages. If successful, a +PlayerSession object is returned for each session that matches the request. Related +actions All APIs by task # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3100,12 +3354,15 @@ end describe_runtime_configuration(fleet_id) describe_runtime_configuration(fleet_id, params::Dict{String,<:Any}) -Retrieves a fleet's runtime configuration settings. The runtime configuration tells Amazon -GameLift which server processes to run (and how) on each instance in the fleet. To get the -runtime configuration that is currently in forces for a fleet, provide the fleet ID. If -successful, a RuntimeConfiguration object is returned for the requested fleet. If the -requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon -GameLift fleets Running multiple processes on a fleet +Retrieves a fleet's runtime configuration settings. The runtime configuration determines +which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the +runtime configuration describes server processes that run on each fleet instance. For +container fleets, the runtime configuration describes server processes that run in each +replica container group. You can update a fleet's runtime configuration at any time using +UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide +the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested +fleet. If the requested fleet has been deleted, the result set is empty. Learn more +Setting up Amazon GameLift fleets Running multiple processes on a fleet # Arguments - `fleet_id`: A unique identifier for the fleet to get the runtime configuration for. You @@ -3293,21 +3550,27 @@ end get_compute_access(compute_name, fleet_id) get_compute_access(compute_name, fleet_id, params::Dict{String,<:Any}) -Requests remote access to a fleet instance. Remote access is useful for debugging, -gathering benchmarking data, or observing activity in real time. To remotely access an -instance, you need credentials that match the operating system of the instance. For a -Windows instance, Amazon GameLift returns a user name and password as strings for use with -a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name -and RSA private key, also as strings, for use with an SSH client. The private key must be -saved in the proper format to a .pem file before using. If you're making this request using -the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as -shown in one of the examples for this operation. To request access to a specific instance, -specify the IDs of both the instance and the fleet it belongs to. Learn more Remotely -Access Fleet Instances Debug Fleet Issues + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Requests authorization to remotely connect to a hosting +resource in a Amazon GameLift managed fleet. This operation is not used with Amazon +GameLift Anywhere fleets To request access, specify the compute name and the fleet ID. If +successful, this operation returns a set of temporary Amazon Web Services credentials, +including a two-part access key and a session token. EC2 fleets With an EC2 fleet (where +compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start +a session with the compute. For more details, see Starting a session (CLI) in the Amazon +EC2 Systems Manager User Guide. Container fleets With a container fleet (where compute +type is CONTAINER), use these credentials and the target value with SSM to connect to the +fleet instance where the container is running. After you're connected to the instance, use +Docker commands to interact with the container. Learn more Remotely connect to fleet +instances Debug fleet issues Remotely connect to a container fleet # Arguments -- `compute_name`: The name of the compute resource you are requesting credentials for. -- `fleet_id`: A unique identifier for the fleet that the compute resource is registered to. +- `compute_name`: A unique identifier for the compute resource that you want to connect to. + For an EC2 fleet compute, use the instance ID. For a container fleet, use the compute name + (for example, a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the + compute ARN. +- `fleet_id`: A unique identifier for the fleet that holds the compute resource that you + want to connect to. You can use either the fleet ID or ARN value. """ function get_compute_access( @@ -3344,14 +3607,25 @@ end get_compute_auth_token(compute_name, fleet_id) get_compute_auth_token(compute_name, fleet_id, params::Dict{String,<:Any}) -Requests an authentication token from Amazon GameLift. The authentication token is used by -your game server to authenticate with Amazon GameLift. Each authentication token has an -expiration time. To continue using the compute resource to host your game server, regularly -retrieve a new authorization token. +Requests an authentication token from Amazon GameLift for a compute resource in an Amazon +GameLift Anywhere fleet or container fleet. Game servers that are running on the compute +use this token to communicate with the Amazon GameLift service, such as when calling the +Amazon GameLift server SDK action InitSDK(). Authentication tokens are valid for a limited +time span, so you need to request a fresh token before the current token expires. Use this +operation based on the fleet compute type: For EC2 fleets, auth token retrieval and +refresh is handled automatically. All game servers that are running on all fleet instances +have access to a valid auth token. For ANYWHERE and CONTAINER fleets, if you're using the +Amazon GameLift Agent, auth token retrieval and refresh is handled automatically for any +container or Anywhere compute where the Agent is running. If you're not using the Agent, +create a mechanism to retrieve and refresh auth tokens for computes that are running game +server processes. Learn more Create an Anywhere fleet Test your integration +Server SDK reference guides (for version 5.x) # Arguments - `compute_name`: The name of the compute resource you are requesting the authentication - token for. + token for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet + instance, use the instance ID. For a container fleet, use the compute name (for example, + a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN. - `fleet_id`: A unique identifier for the fleet that the compute is registered to. """ @@ -3428,26 +3702,27 @@ end get_instance_access(fleet_id, instance_id) get_instance_access(fleet_id, instance_id, params::Dict{String,<:Any}) -Requests remote access to a fleet instance. Remote access is useful for debugging, -gathering benchmarking data, or observing activity in real time. To remotely access an -instance, you need credentials that match the operating system of the instance. For a -Windows instance, Amazon GameLift returns a user name and password as strings for use with -a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name -and RSA private key, also as strings, for use with an SSH client. The private key must be -saved in the proper format to a .pem file before using. If you're making this request using -the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as -shown in one of the examples for this operation. To request access to a specific instance, -specify the IDs of both the instance and the fleet it belongs to. You can retrieve a -fleet's instance IDs by calling DescribeInstances. Learn more Remotely Access Fleet -Instances Debug Fleet Issues Related actions All APIs by task +Requests authorization to remotely connect to an instance in an Amazon GameLift managed +fleet. Use this operation to connect to instances with game servers that use Amazon +GameLift server SDK 4.x or earlier. To connect to instances with game servers that use +server SDK 5.x or later, call GetComputeAccess. To request access to an instance, specify +IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet +by calling DescribeInstances with the fleet ID. If successful, this operation returns an +IP address and credentials. The returned credentials match the operating system of the +instance, as follows: For a Windows instance: returns a user name and secret (password) +for use with a Windows Remote Desktop client. For a Linux instance: returns a user name +and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem +file. If you're using the CLI, see the example Get credentials for a Linux instance for +tips on automatically saving the secret to a .pem file. Learn more Remotely connect +to fleet instances Debug fleet issues Related actions All APIs by task # Arguments -- `fleet_id`: A unique identifier for the fleet that contains the instance you want access - to. You can use either the fleet ID or ARN value. The fleet can be in any of the following - statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a - short time before they are deleted. -- `instance_id`: A unique identifier for the instance you want to get access to. You can - access an instance in any status. +- `fleet_id`: A unique identifier for the fleet that contains the instance you want to + access. You can request access to instances in EC2 fleets with the following statuses: + ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value. You can access + fleets in ERROR status for a short period of time before Amazon GameLift deletes them. +- `instance_id`: A unique identifier for the instance you want to access. You can access an + instance in any status. """ function get_instance_access( @@ -3557,17 +3832,30 @@ end list_compute(fleet_id) list_compute(fleet_id, params::Dict{String,<:Any}) -Retrieves all compute resources registered to a fleet in your Amazon Web Services account. -You can filter the result set by location. + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Retrieves information on the compute resources in an +Amazon GameLift fleet. To request a list of computes, specify the fleet ID. Use the +pagination parameters to retrieve results in a set of sequential pages. You can filter the +result set by location. If successful, this operation returns information on all computes +in the requested fleet. Depending on the fleet's compute type, the result includes the +following information: For EC2 fleets, this operation returns information about the EC2 +instance. Compute names are instance IDs. For ANYWHERE fleets, this operation returns the +compute names and details provided when the compute was registered with RegisterCompute. +The GameLiftServiceSdkEndpoint or GameLiftAgentEndpoint is included. For CONTAINER +fleets, this operation returns information about containers that are registered as +computes, and the instances they're running on. Compute names are container names. # Arguments -- `fleet_id`: A unique identifier for the fleet the compute resources are registered to. +- `fleet_id`: A unique identifier for the fleet to retrieve compute resources for. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Limit"`: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. -- `"Location"`: The name of the custom location that the compute resources are assigned to. +- `"Location"`: The name of a location to retrieve compute resources for. For an Amazon + GameLift Anywhere fleet, use a custom location. For a multi-location EC2 or container + fleet, provide a Amazon Web Services Region or Local Zone code (for example: us-west-2 or + us-west-2-lax-1). - `"NextToken"`: A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. @@ -3591,29 +3879,74 @@ function list_compute( ) end +""" + list_container_group_definitions() + list_container_group_definitions(params::Dict{String,<:Any}) + + This operation is used with the Amazon GameLift containers feature, which is currently in +public preview. Retrieves all container group definitions for the Amazon Web Services +account and Amazon Web Services Region that are currently in use. You can filter the result +set by the container groups' scheduling strategy. Use the pagination parameters to retrieve +results in a set of sequential pages. This operation returns the list of container group +definitions in no particular order. Learn more Manage a container group definition + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: The maximum number of results to return. Use this parameter with NextToken to + get results as a set of sequential pages. +- `"NextToken"`: A token that indicates the start of the next sequential page of results. + Use the token that is returned with a previous call to this operation. To start at the + beginning of the result set, do not specify a value. +- `"SchedulingStrategy"`: The type of container group definitions to retrieve. DAEMON -- + Daemon container groups run background processes and are deployed once per fleet instance. + REPLICA -- Replica container groups run your game server application and supporting + software. Replica groups might be deployed multiple times per fleet instance. +""" +function list_container_group_definitions(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "ListContainerGroupDefinitions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_container_group_definitions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return gamelift( + "ListContainerGroupDefinitions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_fleets() list_fleets(params::Dict{String,<:Any}) -Retrieves a collection of fleet resources in an Amazon Web Services Region. You can call -this operation to get fleets in a previously selected default Region (see -https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-region.htmlor specify a -Region in your request. You can filter the result set to find only those fleets that are -deployed with a specific build or script. For fleets that have multiple locations, this -operation retrieves fleets based on their home Region only. This operation can be used in -the following ways: To get a list of all fleets in a Region, don't provide a build or -script identifier. To get a list of all fleets where a specific custom game build is -deployed, provide the build ID. To get a list of all Realtime Servers fleets with a -specific configuration script, provide the script ID. Use the pagination parameters to -retrieve results as a set of sequential pages. If successful, a list of fleet IDs that -match the request parameters is returned. A NextToken value is also returned if there are -more result pages to retrieve. Fleet resources are not listed in a particular order. -Learn more Setting up Amazon GameLift fleets + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Retrieves a collection of fleet resources in an Amazon Web +Services Region. You can filter the result set to find only those fleets that are deployed +with a specific build or script. For fleets that have multiple locations, this operation +retrieves fleets based on their home Region only. You can use operation in the following +ways: To get a list of all fleets in a Region, don't provide a build or script +identifier. To get a list of all fleets where a specific game build is deployed, provide +the build ID. To get a list of all Realtime Servers fleets with a specific configuration +script, provide the script ID. To get a list of all fleets with a specific container +group definition, provide the ContainerGroupDefinition ID. Use the pagination parameters +to retrieve results as a set of sequential pages. If successful, this operation returns a +list of fleet IDs that match the request parameters. A NextToken value is also returned if +there are more result pages to retrieve. Fleet IDs are returned in no particular order. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BuildId"`: A unique identifier for the build to request fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value. +- `"ContainerGroupDefinitionName"`: The container group definition name to request fleets + for. Use this parameter to return only fleets that are deployed with the specified + container group definition. - `"Limit"`: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. - `"NextToken"`: A token that indicates the start of the next sequential page of results. @@ -3945,28 +4278,38 @@ end register_compute(compute_name, fleet_id) register_compute(compute_name, fleet_id, params::Dict{String,<:Any}) -Registers your compute resources in a fleet you previously created. After you register a -compute to your fleet, you can monitor and manage your compute using Amazon GameLift. The -operation returns the compute resource containing SDK endpoint you can use to connect your -game server to Amazon GameLift. Learn more Create an Anywhere fleet Test your -integration + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Registers a compute resource in an Amazon GameLift fleet. +Register computes with an Amazon GameLift Anywhere fleet or a container fleet. For an +Anywhere fleet or a container fleet that's running the Amazon GameLift Agent, the Agent +handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the +Agent, call this operation to register fleet computes. To register a compute, give the +compute a name (must be unique within the fleet) and specify the compute resource's DNS +name or IP address. Provide a fleet ID and a fleet location to associate with the compute +being registered. You can optionally include the path to a TLS certificate on the compute +resource. If successful, this operation returns compute details, including an Amazon +GameLift SDK endpoint or Agent endpoint. Game server processes running on the compute can +use this endpoint to communicate with the Amazon GameLift service. Each server process +includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK(). +To view compute details, call DescribeCompute with the compute name. Learn more +Create an Anywhere fleet Test your integration Server SDK reference guides (for +version 5.x) # Arguments -- `compute_name`: A descriptive label that is associated with the compute resource - registered to your fleet. +- `compute_name`: A descriptive label for the compute resource. - `fleet_id`: A unique identifier for the fleet to register the compute to. You can use either the fleet ID or ARN value. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CertificatePath"`: The path to the TLS certificate on your compute resource. The path - and certificate are not validated by Amazon GameLift. -- `"DnsName"`: The DNS name of the compute resource. Amazon GameLift requires the DNS name - or IP address to manage your compute resource. -- `"IpAddress"`: The IP address of the compute resource. Amazon GameLift requires the DNS - name or IP address to manage your compute resource. -- `"Location"`: The name of the custom location you added to the fleet you are registering - this compute resource to. +- `"CertificatePath"`: The path to a TLS certificate on your compute resource. Amazon + GameLift doesn't validate the path and certificate. +- `"DnsName"`: The DNS name of the compute resource. Amazon GameLift requires either a DNS + name or IP address. +- `"IpAddress"`: The IP address of the compute resource. Amazon GameLift requires either a + DNS name or IP address. +- `"Location"`: The name of a custom location to associate with the compute resource being + registered. """ function register_compute( ComputeName, FleetId; aws_config::AbstractAWSConfig=global_aws_config() @@ -4081,7 +4424,7 @@ end Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see -GameSession. To request new credentials, specify the build ID as returned with an initial +CreateBuild. To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID. Learn more Create a Build with Files in S3 All APIs by task @@ -4116,8 +4459,10 @@ end resolve_alias(alias_id) resolve_alias(alias_id, params::Dict{String,<:Any}) -Retrieves the fleet ID that an alias is currently pointing to. Related actions All APIs -by task +Attempts to retrieve a fleet ID that is associated with an alias. Specify a unique alias +identifier. If the alias has a SIMPLE routing strategy, Amazon GameLift returns a fleet ID. +If the alias has a TERMINAL routing strategy, the result is a +TerminalRoutingStrategyException. Related actions All APIs by task # Arguments - `alias_id`: The unique identifier of the alias that you want to retrieve a fleet ID for. @@ -4204,43 +4549,44 @@ end search_game_sessions(params::Dict{String,<:Any}) Retrieves all active game sessions that match a set of search criteria and sorts them into -a specified order. This operation is not designed to be continually called to track game -session status. This practice can cause you to exceed your API limit, which results in -errors. Instead, you must configure configure an Amazon Simple Notification Service (SNS) -topic to receive notifications from FlexMatch or queues. Continuously polling game session -status with DescribeGameSessions should only be used for games in development with low game -session usage. When searching for game sessions, you specify exactly where you want to -search and provide a search filter expression, a sort expression, or both. A search request -can search only one fleet, but it can search all of a fleet's locations. This operation -can be used in the following ways: To search all game sessions that are currently -running on all locations in a fleet, provide a fleet or alias ID. This approach returns -game sessions in the fleet's home Region and all remote locations that fit the search -criteria. To search all game sessions that are currently running on a specific fleet -location, provide a fleet or alias ID and a location name. For location, you can specify a -fleet's home Region or any remote location. Use the pagination parameters to retrieve -results as a set of sequential pages. If successful, a GameSession object is returned for -each game session that matches the request. Search finds game sessions that are in ACTIVE -status only. To retrieve information on game sessions in other statuses, use -DescribeGameSessions . You can search or sort by the following game session attributes: -gameSessionId -- A unique identifier for the game session. You can use either a -GameSessionId or GameSessionArn value. gameSessionName -- Name assigned to a game -session. Game session names do not need to be unique to a game session. -gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. -GameProperty values are stored as key:value pairs; the filter expression must indicate the -key and a string to search the data values for. For example, to search for game sessions -with custom data containing the key:value pair \"gameMode:brawl\", specify the following: -gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings. - maximumSessions -- Maximum number of player sessions allowed for a game session. -creationTimeMillis -- Value indicating when a game session was created. It is expressed in -Unix time as milliseconds. playerSessionCount -- Number of players currently connected -to a game session. This value changes rapidly as players join the session or drop out. -hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached -its maximum number of players. It is highly recommended that all search requests include -this filter attribute to optimize search performance and return only sessions that players -can join. Returned values for playerSessionCount and hasAvailablePlayerSessions change -quickly as players join sessions and others drop out. Results should be considered a -snapshot in time. Be sure to refresh search results often, and handle sessions that fill up -before a player can join. All APIs by task +a specified order. This operation is not designed to continually track game session status +because that practice can cause you to exceed your API limit and generate errors. Instead, +configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications +from a matchmaker or a game session placement queue. When searching for game sessions, you +specify exactly where you want to search and provide a search filter expression, a sort +expression, or both. A search request can search only one fleet, but it can search all of a +fleet's locations. This operation can be used in the following ways: To search all game +sessions that are currently running on all locations in a fleet, provide a fleet or alias +ID. This approach returns game sessions in the fleet's home Region and all remote locations +that fit the search criteria. To search all game sessions that are currently running on a +specific fleet location, provide a fleet or alias ID and a location name. For location, you +can specify a fleet's home Region or any remote location. Use the pagination parameters +to retrieve results as a set of sequential pages. If successful, a GameSession object is +returned for each game session that matches the request. Search finds game sessions that +are in ACTIVE status only. To retrieve information on game sessions in other statuses, use +DescribeGameSessions . To set search and sort criteria, create a filter expression using +the following game session attributes. For game session search examples, see the Examples +section of this topic. gameSessionId -- A unique identifier for the game session. You +can use either a GameSessionId or GameSessionArn value. gameSessionName -- Name +assigned to a game session. Game session names do not need to be unique to a game session. + gameSessionProperties -- A set of key-value pairs that can store custom data in a game +session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. The filter +expression must specify the GameProperty -- a Key and a string Value to search for the game +sessions. For example, to search for the above key-value pair, specify the following search +filter: gameSessionProperties.difficulty = \"novice\". All game property values are +searched as strings. For examples of searching game sessions, see the ones below, and also +see Search game sessions by game property. maximumSessions -- Maximum number of player +sessions allowed for a game session. creationTimeMillis -- Value indicating when a game +session was created. It is expressed in Unix time as milliseconds. playerSessionCount -- +Number of players currently connected to a game session. This value changes rapidly as +players join the session or drop out. hasAvailablePlayerSessions -- Boolean value +indicating whether a game session has reached its maximum number of players. It is highly +recommended that all search requests include this filter attribute to optimize search +performance and return only sessions that players can join. Returned values for +playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions +and others drop out. Results should be considered a snapshot in time. Be sure to refresh +search results often, and handle sessions that fill up before a player can join. All +APIs by task # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4394,9 +4740,8 @@ it with a different queue. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DesiredPlayerSessions"`: Set of information on each player to create a player session for. -- `"GameProperties"`: A set of custom properties for a game session, formatted as key:value - pairs. These properties are passed to a game server process with a request to start a new - game session (see Start a Game Session). +- `"GameProperties"`: A set of key-value pairs that can store custom data in a game + session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. - `"GameSessionData"`: A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). @@ -4613,7 +4958,8 @@ ways: To stop actions on instances in the fleet's home Region, provide a flee the type of actions to suspend. To stop actions on instances in one of the fleet's remote locations, provide a fleet ID, a location name, and the type of actions to suspend. If successful, Amazon GameLift no longer initiates scaling events except in response to -manual changes using UpdateFleetCapacity. Learn more Setting up Amazon GameLift Fleets +manual changes using UpdateFleetCapacity. To restart fleet actions again, call +StartFleetActions. Learn more Setting up Amazon GameLift Fleets # Arguments - `actions`: List of actions to suspend on the fleet. @@ -4897,10 +5243,10 @@ end update_alias(alias_id) update_alias(alias_id, params::Dict{String,<:Any}) -Updates properties for an alias. To update properties, specify the alias ID to be updated -and provide the information to be changed. To reassign an alias to another fleet, provide -an updated routing strategy. If successful, the updated alias record is returned. Related -actions All APIs by task +Updates properties for an alias. Specify the unique identifier of the alias to be updated +and the new property values. When reassigning an alias to a new fleet, provide an updated +routing strategy. If successful, the updated alias record is returned. Related actions +All APIs by task # Arguments - `alias_id`: A unique identifier for the alias that you want to update. You can use either @@ -4976,10 +5322,10 @@ end update_fleet_attributes(fleet_id) update_fleet_attributes(fleet_id, params::Dict{String,<:Any}) -Updates a fleet's mutable attributes, including game session protection and resource -creation limits. To update fleet attributes, specify the fleet ID and the property values -that you want to change. If successful, an updated FleetAttributes object is returned. -Learn more Setting up Amazon GameLift fleets +Updates a fleet's mutable attributes, such as game session protection and resource creation +limits. To update fleet attributes, specify the fleet ID and the property values that you +want to change. If successful, Amazon GameLift returns the identifiers for the updated +fleet. Learn more Setting up Amazon GameLift fleets # Arguments - `fleet_id`: A unique identifier for the fleet to update attribute metadata for. You can @@ -4996,10 +5342,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Name"`: A descriptive label that is associated with a fleet. Fleet names do not need to be unique. - `"NewGameSessionProtectionPolicy"`: The game session protection policy to apply to all - new instances created in this fleet. Instances that already exist are not affected. You can - set protection for individual instances using UpdateGameSession . NoProtection -- The - game session can be terminated during a scale-down event. FullProtection -- If the game - session is in an ACTIVE status, it cannot be terminated during a scale-down event. + new game sessions created in this fleet. Game sessions that already exist are not affected. + You can set protection for individual game sessions using UpdateGameSession . + NoProtection -- The game session can be terminated during a scale-down event. + FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated + during a scale-down event. - `"ResourceCreationLimitPolicy"`: Policy settings that limit the number of game sessions an individual player can create over a span of time. """ @@ -5026,28 +5373,30 @@ end update_fleet_capacity(fleet_id) update_fleet_capacity(fleet_id, params::Dict{String,<:Any}) -Updates capacity settings for a fleet. For fleets with multiple locations, use this -operation to manage capacity settings in each location individually. Fleet capacity -determines the number of game sessions and players that can be hosted based on the fleet -configuration. Use this operation to set the following fleet capacity properties: -Minimum/maximum size: Set hard limits on fleet capacity. Amazon GameLift cannot set the -fleet's capacity to a value outside of this range, whether the capacity is changed manually -or through automatic scaling. Desired capacity: Manually set the number of Amazon EC2 -instances to be maintained in a fleet location. Before changing a fleet's desired capacity, -you may want to call DescribeEC2InstanceLimits to get the maximum capacity of the fleet's -Amazon EC2 instance type. Alternatively, consider using automatic scaling to adjust -capacity based on player demand. This operation can be used in the following ways: To -update capacity for a fleet's home Region, or if the fleet has no remote locations, omit -the Location parameter. The fleet must be in ACTIVE status. To update capacity for a -fleet's remote location, include the Location parameter set to the location to be updated. -The location must be in ACTIVE status. If successful, capacity settings are updated -immediately. In response a change in desired capacity, Amazon GameLift initiates steps to -start new instances or terminate existing instances in the requested fleet location. This -continues until the location's active instance count matches the new desired instance -count. You can track a fleet's current capacity by calling DescribeFleetCapacity or -DescribeFleetLocationCapacity. If the requested desired instance count is higher than the -instance type's limit, the LimitExceeded exception occurs. Learn more Scaling fleet -capacity + This operation has been expanded to use with the Amazon GameLift containers feature, which +is currently in public preview. Updates capacity settings for a managed EC2 fleet or +container fleet. For these fleets, you adjust capacity by changing the number of instances +in the fleet. Fleet capacity determines the number of game sessions and players that the +fleet can host based on its configuration. For fleets with multiple locations, use this +operation to manage capacity settings in each location individually. Use this operation to +set these fleet capacity properties: Minimum/maximum size: Set hard limits on the number +of Amazon EC2 instances allowed. If Amazon GameLift receives a request--either through +manual update or automatic scaling--it won't change the capacity to a value outside of this +range. Desired capacity: As an alternative to automatic scaling, manually set the number +of Amazon EC2 instances to be maintained. Before changing a fleet's desired capacity, check +the maximum capacity of the fleet's Amazon EC2 instance type by calling +DescribeEC2InstanceLimits. To update capacity for a fleet's home Region, or if the fleet +has no remote locations, omit the Location parameter. The fleet must be in ACTIVE status. +To update capacity for a fleet's remote location, set the Location parameter to the +location to update. The location must be in ACTIVE status. If successful, Amazon GameLift +updates the capacity settings and returns the identifiers for the updated fleet and/or +location. If a requested change to desired capacity exceeds the instance type's limit, the +LimitExceeded exception occurs. Updates often prompt an immediate change in fleet +capacity, such as when current capacity is different than the new desired capacity or +outside the new limits. In this scenario, Amazon GameLift automatically initiates steps to +add or remove instances in the fleet location. You can track a fleet's current capacity by +calling DescribeFleetCapacity or DescribeFleetLocationCapacity. Learn more Scaling +fleet capacity # Arguments - `fleet_id`: A unique identifier for the fleet to update capacity settings for. You can @@ -5057,6 +5406,8 @@ capacity Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DesiredInstances"`: The number of Amazon EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits. + Changes in desired instance value can take up to 1 minute to be reflected when viewing the + fleet's capacity settings. - `"Location"`: The name of a remote location to update fleet capacity settings for, in the form of an Amazon Web Services Region code such as us-west-2. - `"MaxSize"`: The maximum number of instances that are allowed in the specified fleet @@ -5087,16 +5438,16 @@ end update_fleet_port_settings(fleet_id) update_fleet_port_settings(fleet_id, params::Dict{String,<:Any}) -Updates permissions that allow inbound traffic to connect to game sessions that are being -hosted on instances in the fleet. To update settings, specify the fleet ID to be updated -and specify the changes to be made. List the permissions you want to add in -InboundPermissionAuthorizations, and permissions you want to remove in -InboundPermissionRevocations. Permissions to be removed must match existing fleet -permissions. If successful, the fleet ID for the updated fleet is returned. For fleets -with remote locations, port setting updates can take time to propagate across all -locations. You can check the status of updates in each location by calling -DescribeFleetPortSettings with a location name. Learn more Setting up Amazon GameLift -fleets +Updates permissions that allow inbound traffic to connect to game sessions in the fleet. +To update settings, specify the fleet ID to be updated and specify the changes to be made. +List the permissions you want to add in InboundPermissionAuthorizations, and permissions +you want to remove in InboundPermissionRevocations. Permissions to be removed must match +existing fleet permissions. For a container fleet, inbound permissions must specify port +numbers that are defined in the fleet's connection port settings. If successful, the fleet +ID for the updated fleet is returned. For fleets with remote locations, port setting +updates can take time to propagate across all locations. You can check the status of +updates in each location by calling DescribeFleetPortSettings with a location name. Learn +more Setting up Amazon GameLift fleets # Arguments - `fleet_id`: A unique identifier for the fleet to update port settings for. You can use @@ -5135,20 +5486,21 @@ end update_game_server(game_server_group_name, game_server_id, params::Dict{String,<:Any}) This operation is used with the Amazon GameLift FleetIQ solution and game server groups. -Updates information about a registered game server to help Amazon GameLift FleetIQ to track +Updates information about a registered game server to help Amazon GameLift FleetIQ track game server availability. This operation is called by a game server process that is running on an instance in a game server group. Use this operation to update the following types of game server information. You can make all three types of updates in the same request: To -update the game server's utilization status, identify the game server and game server group -and specify the current utilization status. Use this status to identify when game servers -are currently hosting games and when they are available to be claimed. To report health -status, identify the game server and game server group and set health check to HEALTHY. If -a game server does not report health status for a certain length of time, the game server -is no longer considered healthy. As a result, it will be eventually deregistered from the -game server group to avoid affecting utilization metrics. The best practice is to report -health every 60 seconds. To change game server metadata, provide updated game server -data. Once a game server is successfully updated, the relevant statuses and timestamps -are updated. Learn more Amazon GameLift FleetIQ Guide +update the game server's utilization status from AVAILABLE (when the game server is +available to be claimed) to UTILIZED (when the game server is currently hosting games). +Identify the game server and game server group and specify the new utilization status. You +can't change the status from to UTILIZED to AVAILABLE . To report health status, identify +the game server and game server group and set health check to HEALTHY. If a game server +does not report health status for a certain length of time, the game server is no longer +considered healthy. As a result, it will be eventually deregistered from the game server +group to avoid affecting utilization metrics. The best practice is to report health every +60 seconds. To change game server metadata, provide updated game server data. Once a +game server is successfully updated, the relevant statuses and timestamps are updated. +Learn more Amazon GameLift FleetIQ Guide # Arguments - `game_server_group_name`: A unique identifier for the game server group where the game @@ -5162,8 +5514,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys servers. - `"HealthCheck"`: Indicates health status of the game server. A request that includes this parameter updates the game server's LastHealthCheckTime timestamp. -- `"UtilizationStatus"`: Indicates whether the game server is available or is currently - hosting gameplay. +- `"UtilizationStatus"`: Indicates if the game server is available or is currently hosting + gameplay. You can update a game server status from AVAILABLE to UTILIZED, but you can't + change a the status from UTILIZED to AVAILABLE. """ function update_game_server( GameServerGroupName, GameServerId; aws_config::AbstractAWSConfig=global_aws_config() @@ -5293,6 +5646,11 @@ object is returned. All APIs by task # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GameProperties"`: A set of key-value pairs that can store custom data in a game + session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. You can use this + parameter to modify game properties in an active game session. This action adds new + properties and modifies existing properties. There is no way to delete properties. For an + example, see Update the value of a game property. - `"MaximumPlayerSessionCount"`: The maximum number of players that can be connected simultaneously to the game session. - `"Name"`: A descriptive label that is associated with a game session. Session names do @@ -5413,8 +5771,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accept a proposed match, if acceptance is required. - `"AdditionalPlayerCount"`: The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single - 12-person team, and the additional player count is set to 2, only 10 players are selected - for the match. This parameter is not used if FlexMatchMode is set to STANDALONE. + 10-person team, and the additional player count is set to 2, 10 players will be selected + for the match and 2 more player slots will be open for future players. This parameter is + not used if FlexMatchMode is set to STANDALONE. - `"BackfillMode"`: The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a @@ -5429,11 +5788,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event. WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift queue to start a game session for the match. -- `"GameProperties"`: A set of custom properties for a game session, formatted as key:value - pairs. These properties are passed to a game server process with a request to start a new - game session (see Start a Game Session). This information is added to the new GameSession - object that is created for a successful match. This parameter is not used if FlexMatchMode - is set to STANDALONE. +- `"GameProperties"`: A set of key-value pairs that can store custom data in a game + session. For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}. This information is + added to the new GameSession object that is created for a successful match. This parameter + is not used if FlexMatchMode is set to STANDALONE. - `"GameSessionData"`: A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session). This information is added to the game session that @@ -5479,24 +5837,27 @@ end update_runtime_configuration(fleet_id, runtime_configuration) update_runtime_configuration(fleet_id, runtime_configuration, params::Dict{String,<:Any}) -Updates the current runtime configuration for the specified fleet, which tells Amazon -GameLift how to launch server processes on all instances in the fleet. You can update a -fleet's runtime configuration at any time after the fleet is created; it does not need to -be in ACTIVE status. To update runtime configuration, specify the fleet ID and provide a -RuntimeConfiguration with an updated set of server process configurations. If successful, -the fleet's runtime configuration settings are updated. Each instance in the fleet -regularly checks for and retrieves updated runtime configurations. Instances immediately -begin complying with the new configuration by launching new server processes or not -replacing existing processes when they shut down. Updating a fleet's runtime configuration -never affects existing server processes. Learn more Setting up Amazon GameLift fleets +Updates the runtime configuration for the specified fleet. The runtime configuration tells +Amazon GameLift how to launch server processes on computes in the fleet. For managed EC2 +fleets, it determines what server processes to run on each fleet instance. For container +fleets, it describes what server processes to run in each replica container group. You can +update a fleet's runtime configuration at any time after the fleet is created; it does not +need to be in ACTIVE status. To update runtime configuration, specify the fleet ID and +provide a RuntimeConfiguration with an updated set of server process configurations. If +successful, the fleet's runtime configuration settings are updated. Fleet computes that run +game server processes regularly check for and receive updated runtime configurations. The +computes immediately take action to comply with the new configuration by launching new +server processes or by not replacing existing processes when they shut down. Updating a +fleet's runtime configuration never affects existing server processes. Learn more +Setting up Amazon GameLift fleets # Arguments - `fleet_id`: A unique identifier for the fleet to update runtime configuration for. You can use either the fleet ID or ARN value. -- `runtime_configuration`: Instructions for launching server processes on each instance in - the fleet. Server processes run either a custom game build executable or a Realtime Servers - script. The runtime configuration lists the types of server processes to run on an - instance, how to launch them, and the number of processes to run concurrently. +- `runtime_configuration`: Instructions for launching server processes on fleet computes. + Server processes run either a custom game build executable or a Realtime Servers script. + The runtime configuration lists the types of server processes to run, how to launch them, + and the number of processes to run concurrently. """ function update_runtime_configuration( diff --git a/src/services/gamesparks.jl b/src/services/gamesparks.jl deleted file mode 100644 index 99d0cf6e0f..0000000000 --- a/src/services/gamesparks.jl +++ /dev/null @@ -1,1257 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: gamesparks -using AWS.Compat -using AWS.UUIDs - -""" - create_game(game_name) - create_game(game_name, params::Dict{String,<:Any}) - - Creates a new game with an empty configuration. After creating your game, you can update -the configuration using UpdateGameConfiguration or ImportGameConfiguration. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A client-defined token. With an active client token in the request, - this action is idempotent. -- `"Description"`: The description of the game. -- `"Tags"`: The list of tags to apply to the game. -""" -function create_game(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "POST", - "/game", - Dict{String,Any}("GameName" => GameName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_game( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/game", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("GameName" => GameName), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_snapshot(game_name) - create_snapshot(game_name, params::Dict{String,<:Any}) - -Creates a snapshot of the game configuration. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the snapshot. -""" -function create_snapshot(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "POST", - "/game/$(GameName)/snapshot"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_snapshot( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/game/$(GameName)/snapshot", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_stage(game_name, role, stage_name) - create_stage(game_name, role, stage_name, params::Dict{String,<:Any}) - -Creates a new stage for stage-by-stage game development and deployment. - -# Arguments -- `game_name`: The name of the game. -- `role`: The Amazon Resource Name (ARN) of the role to run the game with. This role can - be a game-defined role or the default role that GameSparks created. -- `stage_name`: The name of the stage. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A client-defined token. With an active client token in the request, - this action is idempotent. -- `"Description"`: The description of the stage. -- `"Tags"`: The list of tags to apply to the stage. -""" -function create_stage( - GameName, Role, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "POST", - "/game/$(GameName)/stage", - Dict{String,Any}("Role" => Role, "StageName" => StageName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_stage( - GameName, - Role, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/game/$(GameName)/stage", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("Role" => Role, "StageName" => StageName), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_game(game_name) - delete_game(game_name, params::Dict{String,<:Any}) - -Deletes a game. - -# Arguments -- `game_name`: The name of the game to delete. - -""" -function delete_game(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "DELETE", - "/game/$(GameName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_game( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "DELETE", - "/game/$(GameName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_stage(game_name, stage_name) - delete_stage(game_name, stage_name, params::Dict{String,<:Any}) - -Deletes a stage from a game, along with the associated game runtime. - -# Arguments -- `game_name`: The name of the game. -- `stage_name`: The name of the stage to delete. - -""" -function delete_stage( - GameName, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "DELETE", - "/game/$(GameName)/stage/$(StageName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_stage( - GameName, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "DELETE", - "/game/$(GameName)/stage/$(StageName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disconnect_player(game_name, player_id, stage_name) - disconnect_player(game_name, player_id, stage_name, params::Dict{String,<:Any}) - -Disconnects a player from the game runtime. If a player has multiple connections, this -operation attempts to close all of them. - -# Arguments -- `game_name`: The name of the game. -- `player_id`: The unique identifier representing a player. -- `stage_name`: The name of the stage. - -""" -function disconnect_player( - GameName, PlayerId, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "POST", - "/runtime/game/$(GameName)/stage/$(StageName)/player/$(PlayerId)/disconnect"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disconnect_player( - GameName, - PlayerId, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/runtime/game/$(GameName)/stage/$(StageName)/player/$(PlayerId)/disconnect", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_snapshot(game_name, snapshot_id) - export_snapshot(game_name, snapshot_id, params::Dict{String,<:Any}) - -Exports a game configuration snapshot. - -# Arguments -- `game_name`: The name of the game. -- `snapshot_id`: The identifier of the snapshot to export. - -""" -function export_snapshot( - GameName, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/export"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_snapshot( - GameName, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/export", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_extension(name, namespace) - get_extension(name, namespace, params::Dict{String,<:Any}) - -Gets details about a specified extension. - -# Arguments -- `name`: The name of the extension. -- `namespace`: The namespace (qualifier) of the extension. - -""" -function get_extension(Name, Namespace; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_extension( - Name, - Namespace, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_extension_version(extension_version, name, namespace) - get_extension_version(extension_version, name, namespace, params::Dict{String,<:Any}) - -Gets details about a specified extension version. - -# Arguments -- `extension_version`: The version of the extension. -- `name`: The name of the extension. -- `namespace`: The namespace (qualifier) of the extension. - -""" -function get_extension_version( - ExtensionVersion, Name, Namespace; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)/version/$(ExtensionVersion)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_extension_version( - ExtensionVersion, - Name, - Namespace, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)/version/$(ExtensionVersion)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_game(game_name) - get_game(game_name, params::Dict{String,<:Any}) - -Gets details about a game. - -# Arguments -- `game_name`: The name of the game. - -""" -function get_game(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", "/game/$(GameName)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function get_game( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_game_configuration(game_name) - get_game_configuration(game_name, params::Dict{String,<:Any}) - -Gets the configuration of the game. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Sections"`: The list of sections to return. -""" -function get_game_configuration(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", - "/game/$(GameName)/configuration"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_game_configuration( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/configuration", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_generated_code_job(game_name, job_id, snapshot_id) - get_generated_code_job(game_name, job_id, snapshot_id, params::Dict{String,<:Any}) - -Gets details about a job that is generating code for a snapshot. - -# Arguments -- `game_name`: The name of the game. -- `job_id`: The identifier of the code generation job. -- `snapshot_id`: The identifier of the snapshot for the code generation job. - -""" -function get_generated_code_job( - GameName, JobId, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-job/$(JobId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_generated_code_job( - GameName, - JobId, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-job/$(JobId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_player_connection_status(game_name, player_id, stage_name) - get_player_connection_status(game_name, player_id, stage_name, params::Dict{String,<:Any}) - -Gets the status of a player's connection to the game runtime. It's possible for a single -player to have multiple connections to the game runtime. If a player is not connected, this -operation returns an empty list. - -# Arguments -- `game_name`: The name of the game. -- `player_id`: The unique identifier representing a player. -- `stage_name`: The name of the stage. - -""" -function get_player_connection_status( - GameName, PlayerId, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/runtime/game/$(GameName)/stage/$(StageName)/player/$(PlayerId)/connection"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_player_connection_status( - GameName, - PlayerId, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/runtime/game/$(GameName)/stage/$(StageName)/player/$(PlayerId)/connection", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_snapshot(game_name, snapshot_id) - get_snapshot(game_name, snapshot_id, params::Dict{String,<:Any}) - -Gets a copy of the game configuration in a snapshot. - -# Arguments -- `game_name`: The name of the game. -- `snapshot_id`: The identifier of the snapshot. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Sections"`: The list of game configuration sections to be described. -""" -function get_snapshot( - GameName, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_snapshot( - GameName, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_stage(game_name, stage_name) - get_stage(game_name, stage_name, params::Dict{String,<:Any}) - -Gets information about a stage. - -# Arguments -- `game_name`: The name of the game. -- `stage_name`: The name of the stage. - -""" -function get_stage(GameName, StageName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_stage( - GameName, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_stage_deployment(game_name, stage_name) - get_stage_deployment(game_name, stage_name, params::Dict{String,<:Any}) - -Gets information about a stage deployment. - -# Arguments -- `game_name`: The name of the game. -- `stage_name`: The name of the stage. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeploymentId"`: The identifier of the stage deployment. StartStageDeployment returns - the identifier that you use here. -""" -function get_stage_deployment( - GameName, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)/deployment"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_stage_deployment( - GameName, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)/deployment", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - import_game_configuration(game_name, import_source) - import_game_configuration(game_name, import_source, params::Dict{String,<:Any}) - -Imports a game configuration. This operation replaces the current configuration of the -game with the provided input. This is not a reversible operation. If you want to preserve -the previous configuration, use CreateSnapshot to make a new snapshot before importing. - -# Arguments -- `game_name`: The name of the game. -- `import_source`: The source used to import configuration sections. - -""" -function import_game_configuration( - GameName, ImportSource; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "PUT", - "/game/$(GameName)/configuration", - Dict{String,Any}("ImportSource" => ImportSource); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function import_game_configuration( - GameName, - ImportSource, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "PUT", - "/game/$(GameName)/configuration", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ImportSource" => ImportSource), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_extension_versions(name, namespace) - list_extension_versions(name, namespace, params::Dict{String,<:Any}) - -Gets a paginated list of available versions for the extension. Each time an API change is -made to an extension, the version is incremented. The list retrieved by this operation -shows the versions that are currently available. - -# Arguments -- `name`: The name of the extension. -- `namespace`: The namespace (qualifier) of the extension. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_extension_versions( - Name, Namespace; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)/version"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_extension_versions( - Name, - Namespace, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/extension/$(Namespace)/$(Name)/version", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_extensions() - list_extensions(params::Dict{String,<:Any}) - -Gets a paginated list of available extensions. Extensions provide features that games can -use from scripts. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_extensions(; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", "/extension"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_extensions( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", "/extension", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_games() - list_games(params::Dict{String,<:Any}) - -Gets a paginated list of games. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_games(; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", "/game"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_games( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", "/game", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_generated_code_jobs(game_name, snapshot_id) - list_generated_code_jobs(game_name, snapshot_id, params::Dict{String,<:Any}) - -Gets a paginated list of code generation jobs for a snapshot. - -# Arguments -- `game_name`: The name of the game. -- `snapshot_id`: The identifier of the snapshot. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_generated_code_jobs( - GameName, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-jobs"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_generated_code_jobs( - GameName, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-jobs", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_snapshots(game_name) - list_snapshots(game_name, params::Dict{String,<:Any}) - -Gets a paginated list of snapshot summaries from the game. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_snapshots(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_snapshots( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/snapshot", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_stage_deployments(game_name, stage_name) - list_stage_deployments(game_name, stage_name, params::Dict{String,<:Any}) - -Gets a paginated list of stage deployment summaries from the game. - -# Arguments -- `game_name`: The name of the game. -- `stage_name`: The name of the stage. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_stage_deployments( - GameName, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)/deployments"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_stage_deployments( - GameName, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/stage/$(StageName)/deployments", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_stages(game_name) - list_stages(game_name, params::Dict{String,<:Any}) - -Gets a paginated list of stage summaries from the game. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return. Use this parameter with - NextToken to get results as a set of sequential pages. -- `"NextToken"`: The token that indicates the start of the next sequential page of results. - Use the token that is returned with a previous call to this operation. To start at the - beginning of the result set, do not specify a value. -""" -function list_stages(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "GET", - "/game/$(GameName)/stage"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_stages( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/game/$(GameName)/stage", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_tags_for_resource(resource_arn) - list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) - -Lists the tags associated with a GameSparks resource. - -# Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the GameSparks resource. - -""" -function list_tags_for_resource( - ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "GET", - "/tags/$(ResourceArn)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_tags_for_resource( - ResourceArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "GET", - "/tags/$(ResourceArn)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - start_generated_code_job(game_name, generator, snapshot_id) - start_generated_code_job(game_name, generator, snapshot_id, params::Dict{String,<:Any}) - - Starts an asynchronous process that generates client code for system-defined and custom -messages. The resulting code is collected as a .zip file and uploaded to a pre-signed -Amazon S3 URL. - -# Arguments -- `game_name`: The name of the game. -- `generator`: Properties of the generator to use for the job. -- `snapshot_id`: The identifier of the snapshot for which to generate code. - -""" -function start_generated_code_job( - GameName, Generator, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "POST", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-job", - Dict{String,Any}("Generator" => Generator); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_generated_code_job( - GameName, - Generator, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/game/$(GameName)/snapshot/$(SnapshotId)/generated-sdk-code-job", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Generator" => Generator), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - start_stage_deployment(game_name, snapshot_id, stage_name) - start_stage_deployment(game_name, snapshot_id, stage_name, params::Dict{String,<:Any}) - -Deploys a snapshot to the stage and creates a new game runtime. After you call this -operation, you can check the deployment status by using GetStageDeployment. If there are -any players connected to the previous game runtime, then both runtimes persist. Existing -connections to the previous runtime are maintained. When players disconnect and reconnect, -they connect to the new runtime. After there are no connections to the previous game -runtime, it is deleted. - -# Arguments -- `game_name`: The name of the game. -- `snapshot_id`: The identifier of the snapshot to deploy. -- `stage_name`: The name of the stage to deploy the snapshot onto. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: A client-defined token. With an active client token in the request, - this action is idempotent. -""" -function start_stage_deployment( - GameName, SnapshotId, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "POST", - "/game/$(GameName)/stage/$(StageName)/deployment", - Dict{String,Any}("SnapshotId" => SnapshotId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_stage_deployment( - GameName, - SnapshotId, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/game/$(GameName)/stage/$(StageName)/deployment", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("SnapshotId" => SnapshotId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - tag_resource(resource_arn, tags) - tag_resource(resource_arn, tags, params::Dict{String,<:Any}) - -Adds tags to a GameSparks resource. - -# Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource to add the tags to. -- `tags`: The tags to add to the resource. - -""" -function tag_resource(ResourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "POST", - "/tags/$(ResourceArn)", - Dict{String,Any}("tags" => tags); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function tag_resource( - ResourceArn, - tags, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "POST", - "/tags/$(ResourceArn)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - untag_resource(resource_arn, tag_keys) - untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) - -Removes tags from a GameSparks resource. - -# Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource to remove the tags from. -- `tag_keys`: The keys of the tags to remove. - -""" -function untag_resource( - ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "DELETE", - "/tags/$(ResourceArn)", - Dict{String,Any}("tagKeys" => tagKeys); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function untag_resource( - ResourceArn, - tagKeys, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "DELETE", - "/tags/$(ResourceArn)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_game(game_name) - update_game(game_name, params::Dict{String,<:Any}) - -Updates details of the game. - -# Arguments -- `game_name`: The name of the game. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the game. -""" -function update_game(GameName; aws_config::AbstractAWSConfig=global_aws_config()) - return gamesparks( - "PATCH", "/game/$(GameName)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function update_game( - GameName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "PATCH", - "/game/$(GameName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_game_configuration(game_name, modifications) - update_game_configuration(game_name, modifications, params::Dict{String,<:Any}) - -Updates one or more sections of the game configuration. - -# Arguments -- `game_name`: The name of the game. -- `modifications`: The list of modifications to make. - -""" -function update_game_configuration( - GameName, Modifications; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "PATCH", - "/game/$(GameName)/configuration", - Dict{String,Any}("Modifications" => Modifications); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_game_configuration( - GameName, - Modifications, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "PATCH", - "/game/$(GameName)/configuration", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("Modifications" => Modifications), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_snapshot(game_name, snapshot_id) - update_snapshot(game_name, snapshot_id, params::Dict{String,<:Any}) - -Updates the metadata of a GameSparks snapshot. - -# Arguments -- `game_name`: The name of the game. -- `snapshot_id`: The identifier of the snapshot. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the snapshot. -""" -function update_snapshot( - GameName, SnapshotId; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "PATCH", - "/game/$(GameName)/snapshot/$(SnapshotId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_snapshot( - GameName, - SnapshotId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "PATCH", - "/game/$(GameName)/snapshot/$(SnapshotId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_stage(game_name, stage_name) - update_stage(game_name, stage_name, params::Dict{String,<:Any}) - -Updates the metadata of a stage. - -# Arguments -- `game_name`: The name of the game. -- `stage_name`: The name of the stage. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Description"`: The description of the stage. -- `"Role"`: The Amazon Resource Name (ARN) of the role to use for the game snapshots - deployed to this stage. -""" -function update_stage( - GameName, StageName; aws_config::AbstractAWSConfig=global_aws_config() -) - return gamesparks( - "PATCH", - "/game/$(GameName)/stage/$(StageName)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_stage( - GameName, - StageName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return gamesparks( - "PATCH", - "/game/$(GameName)/stage/$(StageName)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/global_accelerator.jl b/src/services/global_accelerator.jl index e9b4fbb912..866d683bb7 100644 --- a/src/services/global_accelerator.jl +++ b/src/services/global_accelerator.jl @@ -72,10 +72,13 @@ Add endpoints to an endpoint group. The AddEndpoints API operation is the recomm option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API). There are two advantages to using AddEndpoints to add -endpoints: It's faster, because Global Accelerator only has to resolve the new endpoints -that you're adding. It's more convenient, because you don't need to specify all of the -current endpoints that are already in the endpoint group in addition to the new endpoints -that you want to add. +endpoints in Global Accelerator: It's faster, because Global Accelerator only has to +resolve the new endpoints that you're adding, rather than resolving new and existing +endpoints. It's more convenient, because you don't need to specify the current endpoints +that are already in the endpoint group, in addition to the new endpoints that you want to +add. For information about endpoint types and requirements for endpoints that you can add +to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator +Developer Guide. # Arguments - `endpoint_configurations`: The list of endpoint objects. @@ -133,7 +136,9 @@ Developer Guide. # Arguments - `cidr`: The address range, in CIDR notation. This must be the exact range that you - provisioned. You can't advertise only a portion of the provisioned range. + provisioned. You can't advertise only a portion of the provisioned range. For more + information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer + Guide. """ function advertise_byoip_cidr(Cidr; aws_config::AbstractAWSConfig=global_aws_config()) @@ -232,7 +237,7 @@ connections and direct traffic to one or more endpoint groups, each of which inc endpoints, such as Network Load Balancers. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for -example, specify --region us-west-2 on AWS CLI commands. +example, specify --region us-west-2 on Amazon Web Services CLI commands. # Arguments - `idempotency_token`: A unique, case-sensitive identifier that you provide to ensure the @@ -294,6 +299,73 @@ function create_accelerator( ) end +""" + create_cross_account_attachment(idempotency_token, name) + create_cross_account_attachment(idempotency_token, name, params::Dict{String,<:Any}) + +Create a cross-account attachment in Global Accelerator. You create a cross-account +attachment to specify the principals who have permission to work with resources in +accelerators in their own account. You specify, in the same attachment, the resources that +are shared. A principal can be an Amazon Web Services account number or the Amazon Resource +Name (ARN) for an accelerator. For account numbers that are listed as principals, to work +with a resource listed in the attachment, you must sign in to an account specified as a +principal. Then, you can work with resources that are listed, with any of your +accelerators. If an accelerator ARN is listed in the cross-account attachment as a +principal, anyone with permission to make updates to the accelerator can work with +resources that are listed in the attachment. Specify each principal and resource +separately. To specify two CIDR address pools, list them individually under Resources, and +so on. For a command line operation, for example, you might use a statement like the +following: \"Resources\": [{\"Cidr\": \"169.254.60.0/24\"},{\"Cidr\": +\"169.254.59.0/24\"}] For more information, see Working with cross-account attachments +and resources in Global Accelerator in the Global Accelerator Developer Guide. + +# Arguments +- `idempotency_token`: A unique, case-sensitive identifier that you provide to ensure the + idempotency—that is, the uniqueness—of the request. +- `name`: The name of the cross-account attachment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Principals"`: The principals to include in the cross-account attachment. A principal + can be an Amazon Web Services account number or the Amazon Resource Name (ARN) for an + accelerator. +- `"Resources"`: The Amazon Resource Names (ARNs) for the resources to include in the + cross-account attachment. A resource can be any supported Amazon Web Services resource type + for Global Accelerator or a CIDR range for a bring your own IP address (BYOIP) address + pool. +- `"Tags"`: Add tags for a cross-account attachment. For more information, see Tagging in + Global Accelerator in the Global Accelerator Developer Guide. +""" +function create_cross_account_attachment( + IdempotencyToken, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "CreateCrossAccountAttachment", + Dict{String,Any}("IdempotencyToken" => IdempotencyToken, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cross_account_attachment( + IdempotencyToken, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return global_accelerator( + "CreateCrossAccountAttachment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IdempotencyToken" => IdempotencyToken, "Name" => Name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_custom_routing_accelerator(idempotency_token, name) create_custom_routing_accelerator(idempotency_token, name, params::Dict{String,<:Any}) @@ -306,7 +378,7 @@ to receive traffic, or to specify individual port mappings that can receive traf the AllowCustomRoutingTraffic operation. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for -example, specify --region us-west-2 on AWS CLI commands. +example, specify --region us-west-2 on Amazon Web Services CLI commands. # Arguments - `idempotency_token`: A unique, case-sensitive identifier that you provide to ensure the @@ -498,7 +570,9 @@ end Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you -add it as an endpoint. +add it as an endpoint. For more information about endpoint types and requirements for +endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators +in the Global Accelerator Developer Guide. # Arguments - `endpoint_group_region`: The Amazon Web Services Region where the endpoint group is @@ -701,6 +775,46 @@ function delete_accelerator( ) end +""" + delete_cross_account_attachment(attachment_arn) + delete_cross_account_attachment(attachment_arn, params::Dict{String,<:Any}) + +Delete a cross-account attachment. When you delete an attachment, Global Accelerator +revokes the permission to use the resources in the attachment from all principals in the +list of principals. Global Accelerator revokes the permission for specific resources. For +more information, see Working with cross-account attachments and resources in Global +Accelerator in the Global Accelerator Developer Guide. + +# Arguments +- `attachment_arn`: The Amazon Resource Name (ARN) for the cross-account attachment to + delete. + +""" +function delete_cross_account_attachment( + AttachmentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "DeleteCrossAccountAttachment", + Dict{String,Any}("AttachmentArn" => AttachmentArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_cross_account_attachment( + AttachmentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return global_accelerator( + "DeleteCrossAccountAttachment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AttachmentArn" => AttachmentArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_custom_routing_accelerator(accelerator_arn) delete_custom_routing_accelerator(accelerator_arn, params::Dict{String,<:Any}) @@ -971,7 +1085,8 @@ Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide. # Arguments - `cidr`: The address range, in CIDR notation. The prefix must be the same prefix that you - specified when you provisioned the address range. + specified when you provisioned the address range. For more information, see Bring your own + IP addresses (BYOIP) in the Global Accelerator Developer Guide. """ function deprovision_byoip_cidr(Cidr; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1064,6 +1179,42 @@ function describe_accelerator_attributes( ) end +""" + describe_cross_account_attachment(attachment_arn) + describe_cross_account_attachment(attachment_arn, params::Dict{String,<:Any}) + +Gets configuration information about a cross-account attachment. + +# Arguments +- `attachment_arn`: The Amazon Resource Name (ARN) for the cross-account attachment to + describe. + +""" +function describe_cross_account_attachment( + AttachmentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "DescribeCrossAccountAttachment", + Dict{String,Any}("AttachmentArn" => AttachmentArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_cross_account_attachment( + AttachmentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return global_accelerator( + "DescribeCrossAccountAttachment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AttachmentArn" => AttachmentArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_custom_routing_accelerator(accelerator_arn) describe_custom_routing_accelerator(accelerator_arn, params::Dict{String,<:Any}) @@ -1329,6 +1480,114 @@ function list_byoip_cidrs( ) end +""" + list_cross_account_attachments() + list_cross_account_attachments(params::Dict{String,<:Any}) + +List the cross-account attachments that have been created in Global Accelerator. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The number of cross-account attachment objects that you want to return + with this call. The default value is 10. +- `"NextToken"`: The token for the next set of results. You receive this token from a + previous call. +""" +function list_cross_account_attachments(; aws_config::AbstractAWSConfig=global_aws_config()) + return global_accelerator( + "ListCrossAccountAttachments"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cross_account_attachments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "ListCrossAccountAttachments", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cross_account_resource_accounts() + list_cross_account_resource_accounts(params::Dict{String,<:Any}) + +List the accounts that have cross-account resources. For more information, see Working +with cross-account attachments and resources in Global Accelerator in the Global +Accelerator Developer Guide. + +""" +function list_cross_account_resource_accounts(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "ListCrossAccountResourceAccounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cross_account_resource_accounts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "ListCrossAccountResourceAccounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cross_account_resources(resource_owner_aws_account_id) + list_cross_account_resources(resource_owner_aws_account_id, params::Dict{String,<:Any}) + +List the cross-account resources available to work with. + +# Arguments +- `resource_owner_aws_account_id`: The account ID of a resource owner in a cross-account + attachment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AcceleratorArn"`: The Amazon Resource Name (ARN) of an accelerator in a cross-account + attachment. +- `"MaxResults"`: The number of cross-account resource objects that you want to return with + this call. The default value is 10. +- `"NextToken"`: The token for the next set of results. You receive this token from a + previous call. +""" +function list_cross_account_resources( + ResourceOwnerAwsAccountId; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "ListCrossAccountResources", + Dict{String,Any}("ResourceOwnerAwsAccountId" => ResourceOwnerAwsAccountId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cross_account_resources( + ResourceOwnerAwsAccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return global_accelerator( + "ListCrossAccountResources", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceOwnerAwsAccountId" => ResourceOwnerAwsAccountId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_custom_routing_accelerators() list_custom_routing_accelerators(params::Dict{String,<:Any}) @@ -1688,7 +1947,8 @@ Guide. # Arguments - `cidr`: The public IPv4 address range, in CIDR notation. The most specific IP prefix that you can specify is /24. The address range cannot overlap with another address range that - you've brought to this or another Region. + you've brought to this Amazon Web Services Region or another Region. For more information, + see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide. - `cidr_authorization_context`: A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. @@ -1920,10 +2180,18 @@ end update_accelerator(accelerator_arn) update_accelerator(accelerator_arn, params::Dict{String,<:Any}) -Update an accelerator. Global Accelerator is a global service that supports endpoints in -multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to -create, update, or otherwise work with accelerators. That is, for example, specify --region -us-west-2 on AWS CLI commands. +Update an accelerator to make changes, such as the following: Change the name of the +accelerator. Disable the accelerator so that it no longer accepts or routes traffic, or +so that you can delete it. Enable the accelerator, if it is disabled. Change the IP +address type to dual-stack if it is IPv4, or change the IP address type to IPv4 if it's +dual-stack. Be aware that static IP addresses remain assigned to your accelerator for as +long as it exists, even if you disable the accelerator and it no longer accepts or routes +traffic. However, when you delete the accelerator, you lose the static IP addresses that +are assigned to it, so you can no longer route traffic by using them. Global Accelerator +is a global service that supports endpoints in multiple Amazon Web Services Regions but you +must specify the US West (Oregon) Region to create, update, or otherwise work with +accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI +commands. # Arguments - `accelerator_arn`: The Amazon Resource Name (ARN) of the accelerator to update. @@ -1935,6 +2203,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys set to false, the accelerator can be deleted. - `"IpAddressType"`: The IP address type that an accelerator supports. For a standard accelerator, the value can be IPV4 or DUAL_STACK. +- `"IpAddresses"`: The IP addresses for an accelerator. - `"Name"`: The name of the accelerator. The name can have a maximum of 64 characters, must contain only alphanumeric characters, periods (.), or hyphens (-), and must not begin or end with a hyphen or period. @@ -2012,6 +2281,67 @@ function update_accelerator_attributes( ) end +""" + update_cross_account_attachment(attachment_arn) + update_cross_account_attachment(attachment_arn, params::Dict{String,<:Any}) + +Update a cross-account attachment to add or remove principals or resources. When you update +an attachment to remove a principal (account ID or accelerator) or a resource, Global +Accelerator revokes the permission for specific resources. For more information, see +Working with cross-account attachments and resources in Global Accelerator in the Global +Accelerator Developer Guide. + +# Arguments +- `attachment_arn`: The Amazon Resource Name (ARN) of the cross-account attachment to + update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AddPrincipals"`: The principals to add to the cross-account attachment. A principal is + an account or the Amazon Resource Name (ARN) of an accelerator that the attachment gives + permission to work with resources from another account. The resources are also listed in + the attachment. To add more than one principal, separate the account numbers or accelerator + ARNs, or both, with commas. +- `"AddResources"`: The resources to add to the cross-account attachment. A resource listed + in a cross-account attachment can be used with an accelerator by the principals that are + listed in the attachment. To add more than one resource, separate the resource ARNs with + commas. +- `"Name"`: The name of the cross-account attachment. +- `"RemovePrincipals"`: The principals to remove from the cross-account attachment. A + principal is an account or the Amazon Resource Name (ARN) of an accelerator that the + attachment gives permission to work with resources from another account. The resources are + also listed in the attachment. To remove more than one principal, separate the account + numbers or accelerator ARNs, or both, with commas. +- `"RemoveResources"`: The resources to remove from the cross-account attachment. A + resource listed in a cross-account attachment can be used with an accelerator by the + principals that are listed in the attachment. To remove more than one resource, separate + the resource ARNs with commas. +""" +function update_cross_account_attachment( + AttachmentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return global_accelerator( + "UpdateCrossAccountAttachment", + Dict{String,Any}("AttachmentArn" => AttachmentArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_cross_account_attachment( + AttachmentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return global_accelerator( + "UpdateCrossAccountAttachment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AttachmentArn" => AttachmentArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_custom_routing_accelerator(accelerator_arn) update_custom_routing_accelerator(accelerator_arn, params::Dict{String,<:Any}) @@ -2028,6 +2358,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys set to false, the accelerator can be deleted. - `"IpAddressType"`: The IP address type that an accelerator supports. For a custom routing accelerator, the value must be IPV4. +- `"IpAddresses"`: The IP addresses for an accelerator. - `"Name"`: The name of the accelerator. The name can have a maximum of 64 characters, must contain only alphanumeric characters, periods (.), or hyphens (-), and must not begin or end with a hyphen or period. @@ -2277,7 +2608,8 @@ routing to Amazon Web Services because of propagation delays. For more informati Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide. # Arguments -- `cidr`: The address range, in CIDR notation. +- `cidr`: The address range, in CIDR notation. For more information, see Bring your own IP + addresses (BYOIP) in the Global Accelerator Developer Guide. """ function withdraw_byoip_cidr(Cidr; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/glue.jl b/src/services/glue.jl index 733c7cd44f..381075a80b 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -551,6 +551,38 @@ function batch_get_partition( ) end +""" + batch_get_table_optimizer(entries) + batch_get_table_optimizer(entries, params::Dict{String,<:Any}) + +Returns the configuration for the specified table optimizers. + +# Arguments +- `entries`: A list of BatchGetTableOptimizerEntry objects specifying the table optimizers + to retrieve. + +""" +function batch_get_table_optimizer( + Entries; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "BatchGetTableOptimizer", + Dict{String,Any}("Entries" => Entries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_table_optimizer( + Entries, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "BatchGetTableOptimizer", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Entries" => Entries), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_triggers(trigger_names) batch_get_triggers(trigger_names, params::Dict{String,<:Any}) @@ -988,7 +1020,8 @@ end create_connection(connection_input) create_connection(connection_input, params::Dict{String,<:Any}) -Creates a connection definition in the Data Catalog. +Creates a connection definition in the Data Catalog. Connections used for creating +federated resources require the IAM glue:PassConnection permission. # Arguments - `connection_input`: A ConnectionInput object defining the connection to create. @@ -1376,7 +1409,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9. +- `"JobMode"`: A mode that describes how a job was created. Valid values are: SCRIPT - + The job was created using the Glue Studio script editor. VISUAL - The job was created + using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive + sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the + default value. - `"LogUri"`: This field is reserved for future use. +- `"MaintenanceWindow"`: This field specifies a day of the week and hour for a maintenance + window for streaming jobs. Glue periodically performs maintenance activities. During these + maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the + job within 3 hours of the specified maintenance window. For instance, if you set up the + maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM + GMT to 1:00PM GMT. - `"MaxCapacity"`: For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity @@ -1405,20 +1449,37 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys guide. - `"Timeout"`: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 - minutes (48 hours). + minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days + or 10080 minutes. When the value is left blank, the job will be restarted after 7 days + based if you have not setup a maintenance window. If you have setup maintenance window, it + will be restarted during the maintenance window after 7 days. - `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts - a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray - jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a - 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 - DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to - 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We - recommend this worker type for memory-intensive jobs. For the G.025X worker type, each - worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per - worker. We recommend this worker type for low volume streaming jobs. This worker type is - only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker - maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers - based on the autoscaler. + a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray + jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with + 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for workloads such as data transforms, joins, and queries, to offers a scalable + and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 + DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 + executor per worker. We recommend this worker type for workloads such as data transforms, + joins, and queries, to offers a scalable and cost effective way to run most jobs. For the + G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk + (approximately 235GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), + US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), + Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the + G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk + (approximately 487GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For + the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB + disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for low volume streaming jobs. This worker type is only available for Glue + version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU + (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 + Ray workers based on the autoscaler. """ function create_job(Command, Name, Role; aws_config::AbstractAWSConfig=global_aws_config()) return glue( @@ -1914,17 +1975,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Timeout"`: The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. -- `"WorkerType"`: The type of predefined worker that is allocated to use for the session. - Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each - worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For - the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and - provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. - For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), - and provides 1 executor per worker. We recommend this worker type for memory-intensive - jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, - 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low - volume streaming jobs. This worker type is only available for Glue version 3.0 streaming - jobs. +- `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts + a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray + notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) + with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend + this worker type for workloads such as data transforms, joins, and queries, to offers a + scalable and cost effective way to run most jobs. For the G.2X worker type, each worker + maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and + provides 1 executor per worker. We recommend this worker type for workloads such as data + transforms, joins, and queries, to offers a scalable and cost effective way to run most + jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) + with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We + recommend this worker type for jobs whose workloads contain your most demanding transforms, + aggregations, joins, and queries. This worker type is available only for Glue version 3.0 + or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US + East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia + Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe + (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of + memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We + recommend this worker type for jobs whose workloads contain your most demanding transforms, + aggregations, joins, and queries. This worker type is available only for Glue version 3.0 + or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X + worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of + memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based + on the autoscaler. """ function create_session( Command, Id, Role; aws_config::AbstractAWSConfig=global_aws_config() @@ -1973,6 +2047,8 @@ Creates a new table definition in the Data Catalog. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CatalogId"`: The ID of the Data Catalog in which to create the Table. If none is supplied, the Amazon Web Services account ID is used by default. +- `"OpenTableFormatInput"`: Specifies an OpenTableFormatInput structure when creating an + open format table. - `"PartitionIndexes"`: A list of partition indexes, PartitionIndex structures, to create in the table. - `"TransactionId"`: The ID of the transaction. @@ -2009,6 +2085,72 @@ function create_table( ) end +""" + create_table_optimizer(catalog_id, database_name, table_name, table_optimizer_configuration, type) + create_table_optimizer(catalog_id, database_name, table_name, table_optimizer_configuration, type, params::Dict{String,<:Any}) + +Creates a new table optimizer for a specific function. compaction is the only currently +supported optimizer type. + +# Arguments +- `catalog_id`: The Catalog ID of the table. +- `database_name`: The name of the database in the catalog in which the table resides. +- `table_name`: The name of the table. +- `table_optimizer_configuration`: A TableOptimizerConfiguration object representing the + configuration of a table optimizer. +- `type`: The type of table optimizer. Currently, the only valid value is compaction. + +""" +function create_table_optimizer( + CatalogId, + DatabaseName, + TableName, + TableOptimizerConfiguration, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "CreateTableOptimizer", + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "TableOptimizerConfiguration" => TableOptimizerConfiguration, + "Type" => Type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_table_optimizer( + CatalogId, + DatabaseName, + TableName, + TableOptimizerConfiguration, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "CreateTableOptimizer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "TableOptimizerConfiguration" => TableOptimizerConfiguration, + "Type" => Type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_trigger(actions, name, type) create_trigger(actions, name, type, params::Dict{String,<:Any}) @@ -2068,6 +2210,52 @@ function create_trigger( ) end +""" + create_usage_profile(configuration, name) + create_usage_profile(configuration, name, params::Dict{String,<:Any}) + +Creates an Glue usage profile. + +# Arguments +- `configuration`: A ProfileConfiguration object specifying the job and session values for + the profile. +- `name`: The name of the usage profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the usage profile. +- `"Tags"`: A list of tags applied to the usage profile. +""" +function create_usage_profile( + Configuration, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "CreateUsageProfile", + Dict{String,Any}("Configuration" => Configuration, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_usage_profile( + Configuration, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "CreateUsageProfile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Configuration" => Configuration, "Name" => Name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_user_defined_function(database_name, function_input) create_user_defined_function(database_name, function_input, params::Dict{String,<:Any}) @@ -2988,6 +3176,66 @@ function delete_table( ) end +""" + delete_table_optimizer(catalog_id, database_name, table_name, type) + delete_table_optimizer(catalog_id, database_name, table_name, type, params::Dict{String,<:Any}) + +Deletes an optimizer and all associated metadata for a table. The optimization will no +longer be performed on the table. + +# Arguments +- `catalog_id`: The Catalog ID of the table. +- `database_name`: The name of the database in the catalog in which the table resides. +- `table_name`: The name of the table. +- `type`: The type of table optimizer. + +""" +function delete_table_optimizer( + CatalogId, + DatabaseName, + TableName, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "DeleteTableOptimizer", + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_table_optimizer( + CatalogId, + DatabaseName, + TableName, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "DeleteTableOptimizer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_table_version(database_name, table_name, version_id) delete_table_version(database_name, table_name, version_id, params::Dict{String,<:Any}) @@ -3075,6 +3323,35 @@ function delete_trigger( ) end +""" + delete_usage_profile(name) + delete_usage_profile(name, params::Dict{String,<:Any}) + +Deletes the Glue specified usage profile. + +# Arguments +- `name`: The name of the usage profile to delete. + +""" +function delete_usage_profile(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return glue( + "DeleteUsageProfile", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_usage_profile( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "DeleteUsageProfile", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_user_defined_function(database_name, function_name) delete_user_defined_function(database_name, function_name, params::Dict{String,<:Any}) @@ -3462,6 +3739,91 @@ function get_column_statistics_for_table( ) end +""" + get_column_statistics_task_run(column_statistics_task_run_id) + get_column_statistics_task_run(column_statistics_task_run_id, params::Dict{String,<:Any}) + +Get the associated metadata/information for a task run, given a task run ID. + +# Arguments +- `column_statistics_task_run_id`: The identifier for the particular column statistics task + run. + +""" +function get_column_statistics_task_run( + ColumnStatisticsTaskRunId; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetColumnStatisticsTaskRun", + Dict{String,Any}("ColumnStatisticsTaskRunId" => ColumnStatisticsTaskRunId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_column_statistics_task_run( + ColumnStatisticsTaskRunId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "GetColumnStatisticsTaskRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ColumnStatisticsTaskRunId" => ColumnStatisticsTaskRunId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_column_statistics_task_runs(database_name, table_name) + get_column_statistics_task_runs(database_name, table_name, params::Dict{String,<:Any}) + +Retrieves information about all runs associated with the specified table. + +# Arguments +- `database_name`: The name of the database where the table resides. +- `table_name`: The name of the table. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum size of the response. +- `"NextToken"`: A continuation token, if this is a continuation call. +""" +function get_column_statistics_task_runs( + DatabaseName, TableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetColumnStatisticsTaskRuns", + Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_column_statistics_task_runs( + DatabaseName, + TableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "GetColumnStatisticsTaskRuns", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_connection(name) get_connection(name, params::Dict{String,<:Any}) @@ -4003,7 +4365,8 @@ end get_job_run(job_name, run_id) get_job_run(job_name, run_id, params::Dict{String,<:Any}) -Retrieves the metadata for a given job run. +Retrieves the metadata for a given job run. Job run history is accessible for 90 days for +your workflow and job run. # Arguments - `job_name`: Name of the job definition being run. @@ -4953,30 +5316,89 @@ function get_table( end """ - get_table_version(database_name, table_name) - get_table_version(database_name, table_name, params::Dict{String,<:Any}) + get_table_optimizer(catalog_id, database_name, table_name, type) + get_table_optimizer(catalog_id, database_name, table_name, type, params::Dict{String,<:Any}) -Retrieves a specified version of a table. +Returns the configuration of all optimizers associated with a specified table. # Arguments -- `database_name`: The database in the catalog in which the table resides. For Hive - compatibility, this name is entirely lowercase. -- `table_name`: The name of the table. For Hive compatibility, this name is entirely - lowercase. +- `catalog_id`: The Catalog ID of the table. +- `database_name`: The name of the database in the catalog in which the table resides. +- `table_name`: The name of the table. +- `type`: The type of table optimizer. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CatalogId"`: The ID of the Data Catalog where the tables reside. If none is provided, - the Amazon Web Services account ID is used by default. -- `"VersionId"`: The ID value of the table version to be retrieved. A VersionID is a string - representation of an integer. Each version is incremented by 1. """ -function get_table_version( - DatabaseName, TableName; aws_config::AbstractAWSConfig=global_aws_config() +function get_table_optimizer( + CatalogId, + DatabaseName, + TableName, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), ) return glue( - "GetTableVersion", - Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName); + "GetTableOptimizer", + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_table_optimizer( + CatalogId, + DatabaseName, + TableName, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "GetTableOptimizer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_table_version(database_name, table_name) + get_table_version(database_name, table_name, params::Dict{String,<:Any}) + +Retrieves a specified version of a table. + +# Arguments +- `database_name`: The database in the catalog in which the table resides. For Hive + compatibility, this name is entirely lowercase. +- `table_name`: The name of the table. For Hive compatibility, this name is entirely + lowercase. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CatalogId"`: The ID of the Data Catalog where the tables reside. If none is provided, + the Amazon Web Services account ID is used by default. +- `"VersionId"`: The ID value of the table version to be retrieved. A VersionID is a string + representation of an integer. Each version is incremented by 1. +""" +function get_table_version( + DatabaseName, TableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetTableVersion", + Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -5198,6 +5620,11 @@ IAM authorization, the public IAM action associated with this API is glue:GetPar # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AuditContext"`: A structure containing Lake Formation audit context information. +- `"QuerySessionContext"`: A structure used as a protocol between query engines and Lake + Formation or Glue. Contains both a Lake Formation generated authorization identifier and + information from the request's authorization context. +- `"Region"`: Specified only if the base tables belong to a different Amazon Web Services + Region. """ function get_unfiltered_partition_metadata( CatalogId, @@ -5288,6 +5715,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaxResults"`: The maximum number of partitions to return in a single response. - `"NextToken"`: A continuation token, if this is not the first call to retrieve these partitions. +- `"QuerySessionContext"`: A structure used as a protocol between query engines and Lake + Formation or Glue. Contains both a Lake Formation generated authorization identifier and + information from the request's authorization context. +- `"Region"`: Specified only if the base tables belong to a different Amazon Web Services + Region. - `"Segment"`: The segment of the table's partitions to scan in this request. """ function get_unfiltered_partitions_metadata( @@ -5340,18 +5772,48 @@ end get_unfiltered_table_metadata(catalog_id, database_name, name, supported_permission_types) get_unfiltered_table_metadata(catalog_id, database_name, name, supported_permission_types, params::Dict{String,<:Any}) -Retrieves table metadata from the Data Catalog that contains unfiltered metadata. For IAM -authorization, the public IAM action associated with this API is glue:GetTable. +Allows a third-party analytical engine to retrieve unfiltered table metadata from the Data +Catalog. For IAM authorization, the public IAM action associated with this API is +glue:GetTable. # Arguments - `catalog_id`: The catalog ID where the table resides. - `database_name`: (Required) Specifies the name of a database that contains the table. - `name`: (Required) Specifies the name of a table for which you are requesting metadata. -- `supported_permission_types`: (Required) A list of supported permission types. +- `supported_permission_types`: Indicates the level of filtering a third-party analytical + engine is capable of enforcing when calling the GetUnfilteredTableMetadata API operation. + Accepted values are: COLUMN_PERMISSION - Column permissions ensure that users can access + only specific columns in the table. If there are particular columns contain sensitive data, + data lake administrators can define column filters that exclude access to specific columns. + CELL_FILTER_PERMISSION - Cell-level filtering combines column filtering (include or + exclude columns) and row filter expressions to restrict access to individual elements in + the table. NESTED_PERMISSION - Nested permissions combines cell-level filtering and + nested column filtering to restrict access to columns and/or nested columns in specific + rows based on row filter expressions. NESTED_CELL_PERMISSION - Nested cell permissions + combines nested permission with nested cell-level filtering. This allows different subsets + of nested columns to be restricted based on an array of row filter expressions. Note: + Each of these permission types follows a hierarchical order where each subsequent + permission type includes all permission of the previous type. Important: If you provide a + supported permission type that doesn't match the user's level of permissions on the table, + then Lake Formation raises an exception. For example, if the third-party engine calling the + GetUnfilteredTableMetadata operation can enforce only column-level filtering, and the user + has nested cell filtering applied on the table, Lake Formation throws an exception, and + will not return unfiltered table metadata and data access credentials. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AuditContext"`: A structure containing Lake Formation audit context information. +- `"ParentResourceArn"`: The resource ARN of the view. +- `"Permissions"`: The Lake Formation data permissions of the caller on the table. Used to + authorize the call when no view context is found. +- `"QuerySessionContext"`: A structure used as a protocol between query engines and Lake + Formation or Glue. Contains both a Lake Formation generated authorization identifier and + information from the request's authorization context. +- `"Region"`: Specified only if the base tables belong to a different Amazon Web Services + Region. +- `"RootResourceArn"`: The resource ARN of the root view in a chain of nested views. +- `"SupportedDialect"`: A structure specifying the dialect and dialect version used by the + query engine. """ function get_unfiltered_table_metadata( CatalogId, @@ -5399,6 +5861,35 @@ function get_unfiltered_table_metadata( ) end +""" + get_usage_profile(name) + get_usage_profile(name, params::Dict{String,<:Any}) + +Retrieves information about the specified Glue usage profile. + +# Arguments +- `name`: The name of the usage profile to retrieve. + +""" +function get_usage_profile(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return glue( + "GetUsageProfile", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_usage_profile( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetUsageProfile", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_user_defined_function(database_name, function_name) get_user_defined_function(database_name, function_name, params::Dict{String,<:Any}) @@ -5523,7 +6014,8 @@ end get_workflow_run(name, run_id) get_workflow_run(name, run_id, params::Dict{String,<:Any}) -Retrieves the metadata for a given workflow run. +Retrieves the metadata for a given workflow run. Job run history is accessible for 90 days +for your workflow and job run. # Arguments - `name`: Name of the workflow being run. @@ -5678,6 +6170,37 @@ function list_blueprints( ) end +""" + list_column_statistics_task_runs() + list_column_statistics_task_runs(params::Dict{String,<:Any}) + +List all task runs for a particular account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum size of the response. +- `"NextToken"`: A continuation token, if this is a continuation call. +""" +function list_column_statistics_task_runs(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListColumnStatisticsTaskRuns"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_column_statistics_task_runs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListColumnStatisticsTaskRuns", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_crawlers() list_crawlers(params::Dict{String,<:Any}) @@ -6140,6 +6663,69 @@ function list_statements( ) end +""" + list_table_optimizer_runs(catalog_id, database_name, table_name, type) + list_table_optimizer_runs(catalog_id, database_name, table_name, type, params::Dict{String,<:Any}) + +Lists the history of previous optimizer runs for a specific table. + +# Arguments +- `catalog_id`: The Catalog ID of the table. +- `database_name`: The name of the database in the catalog in which the table resides. +- `table_name`: The name of the table. +- `type`: The type of table optimizer. Currently, the only valid value is compaction. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of optimizer runs to return on each call. +- `"NextToken"`: A continuation token, if this is a continuation call. +""" +function list_table_optimizer_runs( + CatalogId, + DatabaseName, + TableName, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "ListTableOptimizerRuns", + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_table_optimizer_runs( + CatalogId, + DatabaseName, + TableName, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "ListTableOptimizerRuns", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "Type" => Type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_triggers() list_triggers(params::Dict{String,<:Any}) @@ -6169,6 +6755,28 @@ function list_triggers( ) end +""" + list_usage_profiles() + list_usage_profiles(params::Dict{String,<:Any}) + +List all the Glue usage profiles. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of usage profiles to return in a single response. +- `"NextToken"`: A continuation token, included if this is a continuation call. +""" +function list_usage_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) + return glue("ListUsageProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_usage_profiles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListUsageProfiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_workflows() list_workflows(params::Dict{String,<:Any}) @@ -6722,6 +7330,63 @@ function start_blueprint_run( ) end +""" + start_column_statistics_task_run(database_name, role, table_name) + start_column_statistics_task_run(database_name, role, table_name, params::Dict{String,<:Any}) + +Starts a column statistics task run, for a specified table and columns. + +# Arguments +- `database_name`: The name of the database where the table resides. +- `role`: The IAM role that the service assumes to generate statistics. +- `table_name`: The name of the table to generate statistics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CatalogID"`: The ID of the Data Catalog where the table reside. If none is supplied, + the Amazon Web Services account ID is used by default. +- `"ColumnNameList"`: A list of the column names to generate statistics. If none is + supplied, all column names for the table will be used by default. +- `"SampleSize"`: The percentage of rows used to generate statistics. If none is supplied, + the entire table will be used to generate stats. +- `"SecurityConfiguration"`: Name of the security configuration that is used to encrypt + CloudWatch logs for the column stats task run. +""" +function start_column_statistics_task_run( + DatabaseName, Role, TableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "StartColumnStatisticsTaskRun", + Dict{String,Any}( + "DatabaseName" => DatabaseName, "Role" => Role, "TableName" => TableName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_column_statistics_task_run( + DatabaseName, + Role, + TableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "StartColumnStatisticsTaskRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DatabaseName" => DatabaseName, "Role" => Role, "TableName" => TableName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_crawler(name) start_crawler(name, params::Dict{String,<:Any}) @@ -6795,7 +7460,7 @@ end Starts a recommendation run that is used to generate rules when you don't know what rules to write. Glue Data Quality analyzes the data and comes up with recommendations for a potential ruleset. You can then triage the ruleset and modify the generated ruleset to your -liking. +liking. Recommendation runs are automatically deleted after 90 days. # Arguments - `data_source`: The data source (Glue table) associated with this run. @@ -7071,21 +7736,37 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with this job run. - `"Timeout"`: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides - the timeout value set in the parent job. Streaming jobs do not have a timeout. The default - for non-streaming jobs is 2,880 minutes (48 hours). + the timeout value set in the parent job. Streaming jobs must have timeout values less than + 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 + days based if you have not setup a maintenance window. If you have setup maintenance + window, it will be restarted during the maintenance window after 7 days. - `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts - a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray - jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a - 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 - DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to - 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We - recommend this worker type for memory-intensive jobs. For the G.025X worker type, each - worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per - worker. We recommend this worker type for low volume streaming jobs. This worker type is - only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker - maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one - per vCPU) based on the autoscaler. + a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray + jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with + 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for workloads such as data transforms, joins, and queries, to offers a scalable + and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 + DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 + executor per worker. We recommend this worker type for workloads such as data transforms, + joins, and queries, to offers a scalable and cost effective way to run most jobs. For the + G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk + (approximately 235GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), + US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), + Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the + G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk + (approximately 487GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For + the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB + disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for low volume streaming jobs. This worker type is only available for Glue + version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU + (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 + Ray workers based on the autoscaler. """ function start_job_run(JobName; aws_config::AbstractAWSConfig=global_aws_config()) return glue( @@ -7259,6 +7940,47 @@ function start_workflow_run( ) end +""" + stop_column_statistics_task_run(database_name, table_name) + stop_column_statistics_task_run(database_name, table_name, params::Dict{String,<:Any}) + +Stops a task run for the specified table. + +# Arguments +- `database_name`: The name of the database where the table resides. +- `table_name`: The name of the table. + +""" +function stop_column_statistics_task_run( + DatabaseName, TableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "StopColumnStatisticsTaskRun", + Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_column_statistics_task_run( + DatabaseName, + TableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "StopColumnStatisticsTaskRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DatabaseName" => DatabaseName, "TableName" => TableName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_crawler(name) stop_crawler(name, params::Dict{String,<:Any}) @@ -8033,8 +8755,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CommitId"`: A commit ID for a commit in the remote repository. - `"Folder"`: An optional folder in the remote repository. - `"JobName"`: The name of the Glue job to be synchronized to or from the remote repository. -- `"Provider"`: The provider for the remote repository. +- `"Provider"`: The provider for the remote repository. Possible values: GITHUB, + AWS_CODE_COMMIT, GITLAB, BITBUCKET. - `"RepositoryName"`: The name of the remote repository that contains the job artifacts. + For BitBucket providers, RepositoryName should include WorkspaceName. Use the format + <WorkspaceName>/<RepositoryName>. - `"RepositoryOwner"`: The owner of the remote repository that contains the job artifacts. """ function update_job_from_source_control(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -8297,8 +9022,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CommitId"`: A commit ID for a commit in the remote repository. - `"Folder"`: An optional folder in the remote repository. - `"JobName"`: The name of the Glue job to be synchronized to or from the remote repository. -- `"Provider"`: The provider for the remote repository. +- `"Provider"`: The provider for the remote repository. Possible values: GITHUB, + AWS_CODE_COMMIT, GITLAB, BITBUCKET. - `"RepositoryName"`: The name of the remote repository that contains the job artifacts. + For BitBucket providers, RepositoryName should include WorkspaceName. Use the format + <WorkspaceName>/<RepositoryName>. - `"RepositoryOwner"`: The owner of the remote repository that contains the job artifacts. """ function update_source_control_from_job(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -8332,11 +9060,14 @@ Updates a metadata table in the Data Catalog. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CatalogId"`: The ID of the Data Catalog where the table resides. If none is provided, the Amazon Web Services account ID is used by default. +- `"Force"`: A flag that can be set to true to ignore matching storage descriptor and + subobject matching requirements. - `"SkipArchive"`: By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version. - `"TransactionId"`: The transaction ID at which to update the table contents. - `"VersionId"`: The version ID at which to update the table contents. +- `"ViewUpdateAction"`: The operation to be performed when updating the view. """ function update_table( DatabaseName, TableInput; aws_config::AbstractAWSConfig=global_aws_config() @@ -8370,6 +9101,71 @@ function update_table( ) end +""" + update_table_optimizer(catalog_id, database_name, table_name, table_optimizer_configuration, type) + update_table_optimizer(catalog_id, database_name, table_name, table_optimizer_configuration, type, params::Dict{String,<:Any}) + +Updates the configuration for an existing table optimizer. + +# Arguments +- `catalog_id`: The Catalog ID of the table. +- `database_name`: The name of the database in the catalog in which the table resides. +- `table_name`: The name of the table. +- `table_optimizer_configuration`: A TableOptimizerConfiguration object representing the + configuration of a table optimizer. +- `type`: The type of table optimizer. Currently, the only valid value is compaction. + +""" +function update_table_optimizer( + CatalogId, + DatabaseName, + TableName, + TableOptimizerConfiguration, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "UpdateTableOptimizer", + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "TableOptimizerConfiguration" => TableOptimizerConfiguration, + "Type" => Type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_table_optimizer( + CatalogId, + DatabaseName, + TableName, + TableOptimizerConfiguration, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "UpdateTableOptimizer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CatalogId" => CatalogId, + "DatabaseName" => DatabaseName, + "TableName" => TableName, + "TableOptimizerConfiguration" => TableOptimizerConfiguration, + "Type" => Type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_trigger(name, trigger_update) update_trigger(name, trigger_update, params::Dict{String,<:Any}) @@ -8411,6 +9207,51 @@ function update_trigger( ) end +""" + update_usage_profile(configuration, name) + update_usage_profile(configuration, name, params::Dict{String,<:Any}) + +Update an Glue usage profile. + +# Arguments +- `configuration`: A ProfileConfiguration object specifying the job and session values for + the profile. +- `name`: The name of the usage profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the usage profile. +""" +function update_usage_profile( + Configuration, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "UpdateUsageProfile", + Dict{String,Any}("Configuration" => Configuration, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_usage_profile( + Configuration, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "UpdateUsageProfile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Configuration" => Configuration, "Name" => Name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_user_defined_function(database_name, function_input, function_name) update_user_defined_function(database_name, function_input, function_name, params::Dict{String,<:Any}) diff --git a/src/services/grafana.jl b/src/services/grafana.jl index 1c08b7afc2..08f09346fd 100644 --- a/src/services/grafana.jl +++ b/src/services/grafana.jl @@ -8,13 +8,20 @@ using AWS.UUIDs associate_license(license_type, workspace_id) associate_license(license_type, workspace_id, params::Dict{String,<:Any}) -Assigns a Grafana Enterprise license to a workspace. Upgrading to Grafana Enterprise incurs -additional fees. For more information, see Upgrade a workspace to Grafana Enterprise. +Assigns a Grafana Enterprise license to a workspace. To upgrade, you must use ENTERPRISE +for the licenseType, and pass in a valid Grafana Labs token for the grafanaToken. Upgrading +to Grafana Enterprise incurs additional fees. For more information, see Upgrade a workspace +to Grafana Enterprise. # Arguments -- `license_type`: The type of license to associate with the workspace. +- `license_type`: The type of license to associate with the workspace. Amazon Managed + Grafana workspaces no longer support Grafana Enterprise free trials. - `workspace_id`: The ID of the workspace to associate the license with. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Grafana-Token"`: A token from Grafana Labs that ties your Amazon Web Services account + with a Grafana Labs account. For more information, see Link your account with Grafana Labs. """ function associate_license( licenseType, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -57,9 +64,8 @@ workspace. Instead, use UpdateWorkspace. ORGANIZATION, you must specify which organizational units the workspace can access in the workspaceOrganizationalUnits parameter. - `authentication_providers`: Specifies whether this workspace uses SAML 2.0, IAM Identity - Center (successor to Single Sign-On), or both to authenticate users for using the Grafana - console within a workspace. For more information, see User authentication in Amazon Managed - Grafana. + Center, or both to authenticate users for using the Grafana console within a workspace. For + more information, see User authentication in Amazon Managed Grafana. - `permission_type`: When creating a workspace through the Amazon Web Services API, CLI or Amazon Web Services CloudFormation, you must manage IAM roles and provision the permissions that the workspace needs to use Amazon Web Services data sources and notification channels. @@ -78,8 +84,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"configuration"`: The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. -- `"grafanaVersion"`: Specifies the version of Grafana to support in the new workspace. - Supported values are 8.4 and 9.4. +- `"grafanaVersion"`: Specifies the version of Grafana to support in the new workspace. If + not specified, defaults to the latest version (for example, 10.4). To get a list of + supported versions, use the ListVersions operation. - `"networkAccessControl"`: Configuration for network access to your workspace. When this is configured, only listed IP addresses and VPC endpoints will be able to access your workspace. Standard Grafana authentication and authorization will still be required. If @@ -92,7 +99,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to be used for this workspace. - `"tags"`: The list of tags associated with the workspace. - `"vpcConfiguration"`: The configuration settings for an Amazon VPC that contains data - sources for your Grafana workspace to connect to. + sources for your Grafana workspace to connect to. Connecting to a private VPC is not yet + available in the Asia Pacific (Seoul) Region (ap-northeast-2). - `"workspaceDataSources"`: This parameter is for internal use only, and should not be used. - `"workspaceDescription"`: A description for the workspace. This is used only to help you identify this workspace. Pattern: ^[p{L}p{Z}p{N}p{P}]{0,2048} @@ -162,11 +170,13 @@ end Creates a Grafana API key for the workspace. This key can be used to authenticate requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html for available -APIs and example requests. +APIs and example requests. In workspaces compatible with Grafana version 9 or above, use +workspace service accounts instead of API keys. API keys will be removed in a future +release. # Arguments - `key_name`: Specifies the name of the key. Keynames must be unique to the workspace. -- `key_role`: Specifies the permission level of the key. Valid values: VIEWER|EDITOR|ADMIN +- `key_role`: Specifies the permission level of the key. Valid values: ADMIN|EDITOR|VIEWER - `seconds_to_live`: Specifies the time in seconds until the key expires. Keys can be valid for up to 30 days. - `workspace_id`: The ID of the workspace to create an API key. @@ -216,6 +226,120 @@ function create_workspace_api_key( ) end +""" + create_workspace_service_account(grafana_role, name, workspace_id) + create_workspace_service_account(grafana_role, name, workspace_id, params::Dict{String,<:Any}) + +Creates a service account for the workspace. A service account can be used to call Grafana +HTTP APIs, and run automated workloads. After creating the service account with the correct +GrafanaRole for your use case, use CreateWorkspaceServiceAccountToken to create a token +that can be used to authenticate and authorize Grafana HTTP API calls. You can only create +service accounts for workspaces that are compatible with Grafana version 9 and above. For +more information about service accounts, see Service accounts in the Amazon Managed Grafana +User Guide. For more information about the Grafana HTTP APIs, see Using Grafana HTTP APIs +in the Amazon Managed Grafana User Guide. + +# Arguments +- `grafana_role`: The permission level to use for this service account. For more + information about the roles and the permissions each has, see User roles in the Amazon + Managed Grafana User Guide. +- `name`: A name for the service account. The name must be unique within the workspace, as + it determines the ID associated with the service account. +- `workspace_id`: The ID of the workspace within which to create the service account. + +""" +function create_workspace_service_account( + grafanaRole, name, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "POST", + "/workspaces/$(workspaceId)/serviceaccounts", + Dict{String,Any}("grafanaRole" => grafanaRole, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspace_service_account( + grafanaRole, + name, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "POST", + "/workspaces/$(workspaceId)/serviceaccounts", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("grafanaRole" => grafanaRole, "name" => name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_workspace_service_account_token(name, seconds_to_live, service_account_id, workspace_id) + create_workspace_service_account_token(name, seconds_to_live, service_account_id, workspace_id, params::Dict{String,<:Any}) + +Creates a token that can be used to authenticate and authorize Grafana HTTP API operations +for the given workspace service account. The service account acts as a user for the API +operations, and defines the permissions that are used by the API. When you create the +service account token, you will receive a key that is used when calling Grafana APIs. Do +not lose this key, as it will not be retrievable again. If you do lose the key, you can +delete the token and recreate it to receive a new key. This will disable the initial key. +Service accounts are only available for workspaces that are compatible with Grafana version +9 and above. + +# Arguments +- `name`: A name for the token to create. +- `seconds_to_live`: Sets how long the token will be valid, in seconds. You can set the + time up to 30 days in the future. +- `service_account_id`: The ID of the service account for which to create a token. +- `workspace_id`: The ID of the workspace the service account resides within. + +""" +function create_workspace_service_account_token( + name, + secondsToLive, + serviceAccountId, + workspaceId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "POST", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens", + Dict{String,Any}("name" => name, "secondsToLive" => secondsToLive); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspace_service_account_token( + name, + secondsToLive, + serviceAccountId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "POST", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "secondsToLive" => secondsToLive), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_workspace(workspace_id) delete_workspace(workspace_id, params::Dict{String,<:Any}) @@ -252,7 +376,9 @@ end delete_workspace_api_key(key_name, workspace_id) delete_workspace_api_key(key_name, workspace_id, params::Dict{String,<:Any}) -Deletes a Grafana API key for the workspace. +Deletes a Grafana API key for the workspace. In workspaces compatible with Grafana version +9 or above, use workspace service accounts instead of API keys. API keys will be removed in +a future release. # Arguments - `key_name`: The name of the API key to delete. @@ -284,6 +410,89 @@ function delete_workspace_api_key( ) end +""" + delete_workspace_service_account(service_account_id, workspace_id) + delete_workspace_service_account(service_account_id, workspace_id, params::Dict{String,<:Any}) + +Deletes a workspace service account from the workspace. This will delete any tokens created +for the service account, as well. If the tokens are currently in use, the will fail to +authenticate / authorize after they are deleted. Service accounts are only available for +workspaces that are compatible with Grafana version 9 and above. + +# Arguments +- `service_account_id`: The ID of the service account to delete. +- `workspace_id`: The ID of the workspace where the service account resides. + +""" +function delete_workspace_service_account( + serviceAccountId, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "DELETE", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_workspace_service_account( + serviceAccountId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "DELETE", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_workspace_service_account_token(service_account_id, token_id, workspace_id) + delete_workspace_service_account_token(service_account_id, token_id, workspace_id, params::Dict{String,<:Any}) + +Deletes a token for the workspace service account. This will disable the key associated +with the token. If any automation is currently using the key, it will no longer be +authenticated or authorized to perform actions with the Grafana HTTP APIs. Service accounts +are only available for workspaces that are compatible with Grafana version 9 and above. + +# Arguments +- `service_account_id`: The ID of the service account from which to delete the token. +- `token_id`: The ID of the token to delete. +- `workspace_id`: The ID of the workspace from which to delete the token. + +""" +function delete_workspace_service_account_token( + serviceAccountId, + tokenId, + workspaceId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "DELETE", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens/$(tokenId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_workspace_service_account_token( + serviceAccountId, + tokenId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "DELETE", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens/$(tokenId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_workspace(workspace_id) describe_workspace(workspace_id, params::Dict{String,<:Any}) @@ -504,6 +713,118 @@ function list_tags_for_resource( ) end +""" + list_versions() + list_versions(params::Dict{String,<:Any}) + +Lists available versions of Grafana. These are available when calling CreateWorkspace. +Optionally, include a workspace to list the versions to which it can be upgraded. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to include in the response. +- `"nextToken"`: The token to use when requesting the next set of results. You receive this + token from a previous ListVersions operation. +- `"workspace-id"`: The ID of the workspace to list the available upgrade versions. If not + included, lists all versions of Grafana that are supported for CreateWorkspace. +""" +function list_versions(; aws_config::AbstractAWSConfig=global_aws_config()) + return grafana( + "GET", "/versions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_versions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "GET", "/versions", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_workspace_service_account_tokens(service_account_id, workspace_id) + list_workspace_service_account_tokens(service_account_id, workspace_id, params::Dict{String,<:Any}) + +Returns a list of tokens for a workspace service account. This does not return the key for +each token. You cannot access keys after they are created. To create a new key, delete the +token and recreate it. Service accounts are only available for workspaces that are +compatible with Grafana version 9 and above. + +# Arguments +- `service_account_id`: The ID of the service account for which to return tokens. +- `workspace_id`: The ID of the workspace for which to return tokens. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of tokens to include in the results. +- `"nextToken"`: The token for the next set of service accounts to return. (You receive + this token from a previous ListWorkspaceServiceAccountTokens operation.) +""" +function list_workspace_service_account_tokens( + serviceAccountId, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "GET", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workspace_service_account_tokens( + serviceAccountId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "GET", + "/workspaces/$(workspaceId)/serviceaccounts/$(serviceAccountId)/tokens", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workspace_service_accounts(workspace_id) + list_workspace_service_accounts(workspace_id, params::Dict{String,<:Any}) + +Returns a list of service accounts for a workspace. Service accounts are only available for +workspaces that are compatible with Grafana version 9 and above. + +# Arguments +- `workspace_id`: The workspace for which to list service accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of service accounts to include in the results. +- `"nextToken"`: The token for the next set of service accounts to return. (You receive + this token from a previous ListWorkspaceServiceAccounts operation.) +""" +function list_workspace_service_accounts( + workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "GET", + "/workspaces/$(workspaceId)/serviceaccounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workspace_service_accounts( + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return grafana( + "GET", + "/workspaces/$(workspaceId)/serviceaccounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_workspaces() list_workspaces(params::Dict{String,<:Any}) @@ -755,9 +1076,8 @@ take a few minutes to take effect. # Arguments - `authentication_providers`: Specifies whether this workspace uses SAML 2.0, IAM Identity - Center (successor to Single Sign-On), or both to authenticate users for using the Grafana - console within a workspace. For more information, see User authentication in Amazon Managed - Grafana. + Center, or both to authenticate users for using the Grafana console within a workspace. For + more information, see User authentication in Amazon Managed Grafana. - `workspace_id`: The ID of the workspace to update the authentication for. # Optional Parameters @@ -809,6 +1129,13 @@ Updates the configuration string for the given workspace about the format and configuration options available, see Working in your Grafana workspace. - `workspace_id`: The ID of the workspace to update. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"grafanaVersion"`: Specifies the version of Grafana to support in the workspace. If not + specified, keeps the current version of the workspace. Can only be used to upgrade (for + example, from 8.4 to 9.4), not downgrade (for example, from 9.4 to 8.4). To know what + versions are available to upgrade to for a specific workspace, see the ListVersions + operation. """ function update_workspace_configuration( configuration, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/greengrassv2.jl b/src/services/greengrassv2.jl index ddf20eb1e5..0cc9a6314d 100644 --- a/src/services/greengrassv2.jl +++ b/src/services/greengrassv2.jl @@ -178,12 +178,11 @@ component from a recipe, specify inlineRecipe when you call this operation. C components from Lambda functions Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from IoT Greengrass V1 to -IoT Greengrass V2. This function only accepts Lambda functions that use the following -runtimes: Python 2.7 – python2.7 Python 3.7 – python3.7 Python 3.8 – -python3.8 Python 3.9 – python3.9 Java 8 – java8 Java 11 – java11 Node.js -10 – nodejs10.x Node.js 12 – nodejs12.x Node.js 14 – nodejs14.x To create a -component from a Lambda function, specify lambdaFunction when you call this operation. IoT -Greengrass currently supports Lambda functions on only Linux core devices. +IoT Greengrass V2. This function accepts Lambda functions in all supported versions of +Python, Node.js, and Java runtimes. IoT Greengrass doesn't apply any additional +restrictions on deprecated Lambda runtime versions. To create a component from a Lambda +function, specify lambdaFunction when you call this operation. IoT Greengrass currently +supports Lambda functions on only Linux core devices. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -512,6 +511,15 @@ install. the section of the URI after the scheme. For example, in the artifact URI greengrass:SomeArtifact.zip, the artifact name is SomeArtifact.zip. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"s3EndpointType"`: Specifies the endpoint to use when getting Amazon S3 pre-signed URLs. + All Amazon Web Services Regions except US East (N. Virginia) use REGIONAL in all cases. In + the US East (N. Virginia) Region the default is GLOBAL, but you can change it to REGIONAL + with this parameter. +- `"x-amz-iot-endpoint-type"`: Determines if the Amazon S3 URL returned is a FIPS + pre-signed URL endpoint. Specify fips if you want the returned Amazon S3 pre-signed URL to + point to an Amazon S3 FIPS endpoint. If you don't specify a value, the default is standard. """ function get_component_version_artifact( arn, artifactName; aws_config::AbstractAWSConfig=global_aws_config() @@ -849,6 +857,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys options: ALL – The list includes all deployments. LATEST_ONLY – The list includes only the latest revision of each deployment. Default: LATEST_ONLY - `"maxResults"`: The maximum number of results to be returned per paginated request. + Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. - `"parentTargetArn"`: The parent deployment's target ARN within a subdeployment. - `"targetArn"`: The ARN of the target IoT thing or thing group. diff --git a/src/services/groundstation.jl b/src/services/groundstation.jl index 4e3ce1262b..c4e87e62c2 100644 --- a/src/services/groundstation.jl +++ b/src/services/groundstation.jl @@ -207,9 +207,9 @@ list of strings has two elements: a from ARN and a to ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"contactPostPassDurationSeconds"`: Amount of time after a contact ends that you’d like - to receive a CloudWatch event indicating the pass has finished. + to receive a Ground Station Contact State Change event indicating the pass has finished. - `"contactPrePassDurationSeconds"`: Amount of time prior to contact start you’d like to - receive a CloudWatch event indicating an upcoming pass. + receive a Ground Station Contact State Change event indicating an upcoming pass. - `"streamsKmsKey"`: KMS key to use for encrypting streams. - `"streamsKmsRole"`: Role to use for encrypting streams with KMS key. - `"tags"`: Tags assigned to a mission profile. @@ -1313,9 +1313,9 @@ parameters for existing future contacts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"contactPostPassDurationSeconds"`: Amount of time after a contact ends that you’d like - to receive a CloudWatch event indicating the pass has finished. + to receive a Ground Station Contact State Change event indicating the pass has finished. - `"contactPrePassDurationSeconds"`: Amount of time after a contact ends that you’d like - to receive a CloudWatch event indicating the pass has finished. + to receive a Ground Station Contact State Change event indicating the pass has finished. - `"dataflowEdges"`: A list of lists of ARNs. Each list of ARNs is an edge, with a from Config and a to Config. - `"minimumViableContactDurationSeconds"`: Smallest amount of time in seconds that you’d diff --git a/src/services/guardduty.jl b/src/services/guardduty.jl index a555bbb3cf..4b096f42e2 100644 --- a/src/services/guardduty.jl +++ b/src/services/guardduty.jl @@ -152,12 +152,19 @@ end create_detector(enable) create_detector(enable, params::Dict{String,<:Any}) -Creates a single Amazon GuardDuty detector. A detector is a resource that represents the -GuardDuty service. To start using GuardDuty, you must create a detector in each Region -where you enable the service. You can have only one detector per account per Region. All -data sources are enabled in a new detector by default. There might be regional differences -because some data sources might not be available in all the Amazon Web Services Regions -where GuardDuty is presently supported. For more information, see Regions and endpoints. +Creates a single GuardDuty detector. A detector is a resource that represents the GuardDuty +service. To start using GuardDuty, you must create a detector in each Region where you +enable the service. You can have only one detector per account per Region. All data sources +are enabled in a new detector by default. When you don't specify any features, with an +exception to RUNTIME_MONITORING, all the optional features are enabled by default. When +you specify some of the features, any feature that is not specified in the API call gets +enabled by default, with an exception to RUNTIME_MONITORING. Specifying both EKS Runtime +Monitoring (EKS_RUNTIME_MONITORING) and Runtime Monitoring (RUNTIME_MONITORING) will cause +an error. You can add only one of these two features because Runtime Monitoring already +includes the threat detection for Amazon EKS resources. For more information, see Runtime +Monitoring. There might be regional differences because some data sources might not be +available in all the Amazon Web Services Regions where GuardDuty is presently supported. +For more information, see Regions and endpoints. # Arguments - `enable`: A Boolean value that specifies whether the detector is to be enabled. @@ -213,11 +220,17 @@ GuardDuty. - `detector_id`: The ID of the detector belonging to the GuardDuty account that you want to create a filter for. - `finding_criteria`: Represents the criteria to be used in the filter for querying - findings. You can only use the following attributes to query findings: accountId region - id resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId - resource.accessKeyDetails.userName resource.accessKeyDetails.userType - resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId - resource.instanceDetails.instanceId resource.instanceDetails.outpostArn + findings. You can only use the following attributes to query findings: accountId id + region severity To filter on the basis of severity, the API and CLI use the following + input list for the FindingCriteria condition: Low: [\"1\", \"2\", \"3\"] Medium: + [\"4\", \"5\", \"6\"] High: [\"7\", \"8\", \"9\"] For more information, see Severity + levels for GuardDuty findings. type updatedAt Type: ISO 8601 string format: + YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains + milliseconds. resource.accessKeyDetails.accessKeyId + resource.accessKeyDetails.principalId resource.accessKeyDetails.userName + resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id + resource.instanceDetails.imageId resource.instanceDetails.instanceId + resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName @@ -225,33 +238,62 @@ GuardDuty. resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId - resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.tags.key - resource.instanceDetails.tags.value resource.resourceType service.action.actionType - service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType - service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.userAgent + resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn + resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions + resource.s3BucketDetails.name resource.s3BucketDetails.tags.key + resource.s3BucketDetails.tags.value resource.s3BucketDetails.type + service.action.actionType service.action.awsApiCallAction.api + service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 + service.action.awsApiCallAction.remoteIpDetails.ipAddressV6 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain + service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol - service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 + service.action.networkConnectionAction.remoteIpDetails.ipAddressV6 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port - service.additionalInfo.threatListName - resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name - resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value - resource.s3BucketDetails.type service.resourceRole severity type updatedAt Type: - ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on - whether the value contains milliseconds. + service.action.awsApiCallAction.remoteAccountDetails.affiliated + service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 + service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6 + service.action.kubernetesApiCallAction.namespace + service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn + service.action.kubernetesApiCallAction.requestUri + service.action.kubernetesApiCallAction.statusCode + service.action.networkConnectionAction.localIpDetails.ipAddressV4 + service.action.networkConnectionAction.localIpDetails.ipAddressV6 + service.action.networkConnectionAction.protocol + service.action.awsApiCallAction.serviceName + service.action.awsApiCallAction.remoteAccountDetails.accountId + service.additionalInfo.threatListName service.resourceRole + resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name + resource.kubernetesDetails.kubernetesWorkloadDetails.namespace + resource.kubernetesDetails.kubernetesUserDetails.username + resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image + resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix + service.ebsVolumeScanDetails.scanId + service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name + service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity + service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash + resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image + resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image + resource.rdsDbInstanceDetails.dbInstanceIdentifier + resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine + resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key + resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 + service.runtimeDetails.process.name service.runtimeDetails.process.name + resource.lambdaDetails.functionName resource.lambdaDetails.functionArn + resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value - `name`: The name of the filter. Valid characters include period (.), underscore (_), dash (-), and alphanumeric characters. A whitespace is considered to be an invalid character. @@ -386,18 +428,91 @@ function create_ipset( ) end +""" + create_malware_protection_plan(protected_resource, role) + create_malware_protection_plan(protected_resource, role, params::Dict{String,<:Any}) + +Creates a new Malware Protection plan for the protected resource. When you create a Malware +Protection plan, the Amazon Web Services service terms for GuardDuty Malware Protection +apply. For more information, see Amazon Web Services service terms for GuardDuty Malware +Protection. + +# Arguments +- `protected_resource`: Information about the protected resource that is associated with + the created Malware Protection plan. Presently, S3Bucket is the only supported protected + resource. +- `role`: IAM role with permissions required to scan and add tags to the associated + protected resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"actions"`: Information about whether the tags will be added to the S3 object after + scanning. +- `"clientToken"`: The idempotency token for the create request. +- `"tags"`: Tags added to the Malware Protection plan resource. +""" +function create_malware_protection_plan( + protectedResource, role; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "POST", + "/malware-protection-plan", + Dict{String,Any}( + "protectedResource" => protectedResource, + "role" => role, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_malware_protection_plan( + protectedResource, + role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return guardduty( + "POST", + "/malware-protection-plan", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "protectedResource" => protectedResource, + "role" => role, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_members(account_details, detector_id) create_members(account_details, detector_id, params::Dict{String,<:Any}) Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated -member accounts either by invitation or through an organization. When using Create Members -as an organizations delegated administrator this action will enable GuardDuty in the added -member accounts, with the exception of the organization delegated administrator account, -which must enable GuardDuty prior to being added as a member. If you are adding accounts by -invitation, use this action after GuardDuty has bee enabled in potential member accounts -and before using InviteMembers. +member accounts either by invitation or through an organization. As a delegated +administrator, using CreateMembers will enable GuardDuty in the added member accounts, with +the exception of the organization delegated administrator account. A delegated +administrator must enable GuardDuty prior to being added as a member. When you use +CreateMembers as an Organizations delegated administrator, GuardDuty applies your +organization's auto-enable settings to the member accounts in this request, irrespective of +the accounts being new or existing members. For more information about the existing +auto-enable settings for your organization, see DescribeOrganizationConfiguration. If you +disassociate a member account that was added by invitation, the member account details +obtained from this API, including the associated email addresses, will be retained. This is +done so that the delegated administrator can invoke the InviteMembers API without the need +to invoke the CreateMembers API again. To remove the details associated with a member +account, the delegated administrator must invoke the DeleteMembers API. When the member +accounts added through Organizations are later disassociated, you (administrator) can't +invite them by calling the InviteMembers API. You can create an association with these +member accounts again only by calling the CreateMembers API. # Arguments - `account_details`: A list of account ID and email address pairs of the accounts that you @@ -790,6 +905,43 @@ function delete_ipset( ) end +""" + delete_malware_protection_plan(malware_protection_plan_id) + delete_malware_protection_plan(malware_protection_plan_id, params::Dict{String,<:Any}) + +Deletes the Malware Protection plan ID associated with the Malware Protection plan +resource. Use this API only when you no longer want to protect the resource associated with +this Malware Protection plan ID. + +# Arguments +- `malware_protection_plan_id`: A unique identifier associated with Malware Protection plan + resource. + +""" +function delete_malware_protection_plan( + malwareProtectionPlanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "DELETE", + "/malware-protection-plan/$(malwareProtectionPlanId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_malware_protection_plan( + malwareProtectionPlanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return guardduty( + "DELETE", + "/malware-protection-plan/$(malwareProtectionPlanId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_members(account_ids, detector_id) delete_members(account_ids, detector_id, params::Dict{String,<:Any}) @@ -1045,8 +1197,8 @@ end disable_organization_admin_account(admin_account_id) disable_organization_admin_account(admin_account_id, params::Dict{String,<:Any}) -Disables an Amazon Web Services account within the Organization as the GuardDuty delegated -administrator. +Removes the existing GuardDuty delegated administrator of the organization. Only the +organization's management account can run this API operation. # Arguments - `admin_account_id`: The Amazon Web Services Account ID for the organizations account to @@ -1084,7 +1236,12 @@ end disassociate_from_administrator_account(detector_id) disassociate_from_administrator_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. With +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account. @@ -1120,7 +1277,12 @@ end disassociate_from_master_account(detector_id) disassociate_from_master_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. # Arguments - `detector_id`: The unique ID of the detector of the GuardDuty member account. @@ -1154,10 +1316,23 @@ end disassociate_members(account_ids, detector_id) disassociate_members(account_ids, detector_id, params::Dict{String,<:Any}) -Disassociates GuardDuty member accounts (to the current administrator account) specified by -the account IDs. With autoEnableOrganizationMembers configuration for your organization set -to ALL, you'll receive an error if you attempt to disassociate a member account before -removing them from your Amazon Web Services organization. +Disassociates GuardDuty member accounts (from the current administrator account) specified +by the account IDs. When you disassociate an invited member from a GuardDuty delegated +administrator, the member account details obtained from the CreateMembers API, including +the associated email addresses, are retained. This is done so that the delegated +administrator can invoke the InviteMembers API without the need to invoke the CreateMembers +API again. To remove the details associated with a member account, the delegated +administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers +configuration for your organization set to ALL, you'll receive an error if you attempt to +disassociate a member account before removing them from your organization. If you +disassociate a member account that was added by invitation, the member account details +obtained from this API, including the associated email addresses, will be retained. This is +done so that the delegated administrator can invoke the InviteMembers API without the need +to invoke the CreateMembers API again. To remove the details associated with a member +account, the delegated administrator must invoke the DeleteMembers API. When the member +accounts added through Organizations are later disassociated, you (administrator) can't +invite them by calling the InviteMembers API. You can create an association with these +member accounts again only by calling the CreateMembers API. # Arguments - `account_ids`: A list of account IDs of the GuardDuty member accounts that you want to @@ -1198,11 +1373,12 @@ end enable_organization_admin_account(admin_account_id) enable_organization_admin_account(admin_account_id, params::Dict{String,<:Any}) -Enables an Amazon Web Services account within the organization as the GuardDuty delegated -administrator. +Designates an Amazon Web Services account within the organization as your GuardDuty +delegated administrator. Only the organization's management account can run this API +operation. # Arguments -- `admin_account_id`: The Amazon Web Services Account ID for the organization account to be +- `admin_account_id`: The Amazon Web Services account ID for the organization account to be enabled as a GuardDuty delegated administrator. """ @@ -1237,8 +1413,9 @@ end get_administrator_account(detector_id) get_administrator_account(detector_id, params::Dict{String,<:Any}) -Provides the details for the GuardDuty administrator account associated with the current -GuardDuty member account. +Provides the details of the GuardDuty administrator account associated with the current +GuardDuty member account. If the organization's management account or a delegated +administrator runs this API, it will return success (HTTP 200) but no content. # Arguments - `detector_id`: The unique ID of the detector of the GuardDuty member account. @@ -1274,8 +1451,8 @@ end Retrieves aggregated statistics for your account. If you are a GuardDuty administrator, you can retrieve the statistics for all the resources associated with the active member -accounts in your organization who have enabled EKS Runtime Monitoring and have the -GuardDuty agent running on their EKS nodes. +accounts in your organization who have enabled Runtime Monitoring and have the GuardDuty +security agent running on their resources. # Arguments - `detector_id`: The unique ID of the GuardDuty detector associated to the coverage @@ -1432,7 +1609,9 @@ end get_findings_statistics(detector_id, finding_statistic_types) get_findings_statistics(detector_id, finding_statistic_types, params::Dict{String,<:Any}) -Lists Amazon GuardDuty findings statistics for the specified detector ID. +Lists Amazon GuardDuty findings statistics for the specified detector ID. There might be +regional differences because some flags might not be available in all the Regions where +GuardDuty is currently supported. For more information, see Regions and endpoints. # Arguments - `detector_id`: The ID of the detector that specifies the GuardDuty service whose @@ -1534,6 +1713,41 @@ function get_ipset( ) end +""" + get_malware_protection_plan(malware_protection_plan_id) + get_malware_protection_plan(malware_protection_plan_id, params::Dict{String,<:Any}) + +Retrieves the Malware Protection plan details associated with a Malware Protection plan ID. + +# Arguments +- `malware_protection_plan_id`: A unique identifier associated with Malware Protection plan + resource. + +""" +function get_malware_protection_plan( + malwareProtectionPlanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "GET", + "/malware-protection-plan/$(malwareProtectionPlanId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_malware_protection_plan( + malwareProtectionPlanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return guardduty( + "GET", + "/malware-protection-plan/$(malwareProtectionPlanId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_malware_scan_settings(detector_id) get_malware_scan_settings(detector_id, params::Dict{String,<:Any}) @@ -1687,6 +1901,36 @@ function get_members( ) end +""" + get_organization_statistics() + get_organization_statistics(params::Dict{String,<:Any}) + +Retrieves how many active member accounts have each feature enabled within GuardDuty. Only +a delegated GuardDuty administrator of an organization can run this API. When you create a +new organization, it might take up to 24 hours to generate the statistics for the entire +organization. + +""" +function get_organization_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return guardduty( + "GET", + "/organization/statistics"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_organization_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "GET", + "/organization/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_remaining_free_trial_days(detector_id) get_remaining_free_trial_days(detector_id, params::Dict{String,<:Any}) @@ -1831,10 +2075,29 @@ end invite_members(account_ids, detector_id) invite_members(account_ids, detector_id, params::Dict{String,<:Any}) -Invites other Amazon Web Services accounts (created as members of the current Amazon Web -Services account by CreateMembers) to enable GuardDuty, and allow the current Amazon Web -Services account to view and manage these accounts' findings on their behalf as the -GuardDuty administrator account. +Invites Amazon Web Services accounts to become members of an organization administered by +the Amazon Web Services account that invokes this API. If you are using Amazon Web Services +Organizations to manage your GuardDuty environment, this step is not needed. For more +information, see Managing accounts with organizations. To invite Amazon Web Services +accounts, the first step is to ensure that GuardDuty has been enabled in the potential +member accounts. You can now invoke this API to add accounts by invitation. The invited +accounts can either accept or decline the invitation from their GuardDuty accounts. Each +invited Amazon Web Services account can choose to accept the invitation from only one +Amazon Web Services account. For more information, see Managing GuardDuty accounts by +invitation. After the invite has been accepted and you choose to disassociate a member +account (by using DisassociateMembers) from your account, the details of the member account +obtained by invoking CreateMembers, including the associated email addresses, will be +retained. This is done so that you can invoke InviteMembers without the need to invoke +CreateMembers again. To remove the details associated with a member account, you must also +invoke DeleteMembers. If you disassociate a member account that was added by invitation, +the member account details obtained from this API, including the associated email +addresses, will be retained. This is done so that the delegated administrator can invoke +the InviteMembers API without the need to invoke the CreateMembers API again. To remove the +details associated with a member account, the delegated administrator must invoke the +DeleteMembers API. When the member accounts added through Organizations are later +disassociated, you (administrator) can't invite them by calling the InviteMembers API. You +can create an association with these member accounts again only by calling the +CreateMembers API. # Arguments - `account_ids`: A list of account IDs of the accounts that you want to invite to GuardDuty @@ -1883,8 +2146,8 @@ end Lists coverage details for your GuardDuty account. If you're a GuardDuty administrator, you can retrieve all resources associated with the active member accounts in your organization. -Make sure the accounts have EKS Runtime Monitoring enabled and GuardDuty agent running on -their EKS nodes. +Make sure the accounts have Runtime Monitoring enabled and GuardDuty agent running on their +resources. # Arguments - `detector_id`: The unique ID of the detector whose coverage details you want to retrieve. @@ -1993,7 +2256,9 @@ end list_findings(detector_id) list_findings(detector_id, params::Dict{String,<:Any}) -Lists Amazon GuardDuty findings for the specified detector ID. +Lists GuardDuty findings for the specified detector ID. There might be regional differences +because some flags might not be available in all the Regions where GuardDuty is currently +supported. For more information, see Regions and endpoints. # Arguments - `detector_id`: The ID of the detector that specifies the GuardDuty service whose findings @@ -2023,6 +2288,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain + service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port @@ -2138,6 +2404,40 @@ function list_ipsets( ) end +""" + list_malware_protection_plans() + list_malware_protection_plans(params::Dict{String,<:Any}) + +Lists the Malware Protection plan IDs associated with the protected resources in your +Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"nextToken"`: You can use this parameter when paginating results. Set the value of this + parameter to null on your first call to the list action. For subsequent calls to the + action, fill nextToken in the request with the value of NextToken from the previous + response to continue listing data. +""" +function list_malware_protection_plans(; aws_config::AbstractAWSConfig=global_aws_config()) + return guardduty( + "GET", + "/malware-protection-plan"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_malware_protection_plans( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "GET", + "/malware-protection-plan", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_members(detector_id) list_members(detector_id, params::Dict{String,<:Any}) @@ -2186,7 +2486,8 @@ end list_organization_admin_accounts() list_organization_admin_accounts(params::Dict{String,<:Any}) -Lists the accounts configured as GuardDuty delegated administrators. +Lists the accounts designated as GuardDuty delegated administrators. Only the +organization's management account can run this API operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2257,8 +2558,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Lists tags for a resource. Tagging is currently supported for detectors, finding filters, -IP sets, and threat intel sets, with a limit of 50 tags per resource. When invoked, this -operation returns all assigned tags for a given resource. +IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per +resource. When invoked, this operation returns all assigned tags for a given resource. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) for the given GuardDuty resource. @@ -2337,7 +2638,8 @@ end start_malware_scan(resource_arn, params::Dict{String,<:Any}) Initiates the malware scan. Invoking this API will automatically create the Service-linked -role in the corresponding account. +role in the corresponding account. When the malware scan starts, you can use the associated +scan ID to track the status of the scan. For more information, see DescribeMalwareScans. # Arguments - `resource_arn`: Amazon Resource Name (ARN) of the resource for which you invoked the API. @@ -2569,10 +2871,13 @@ end update_detector(detector_id) update_detector(detector_id, params::Dict{String,<:Any}) -Updates the Amazon GuardDuty detector specified by the detectorId. There might be regional -differences because some data sources might not be available in all the Amazon Web Services -Regions where GuardDuty is presently supported. For more information, see Regions and -endpoints. +Updates the GuardDuty detector specified by the detector ID. Specifying both EKS Runtime +Monitoring (EKS_RUNTIME_MONITORING) and Runtime Monitoring (RUNTIME_MONITORING) will cause +an error. You can add only one of these two features because Runtime Monitoring already +includes the threat detection for Amazon EKS resources. For more information, see Runtime +Monitoring. There might be regional differences because some data sources might not be +available in all the Amazon Web Services Regions where GuardDuty is presently supported. +For more information, see Regions and endpoints. # Arguments - `detector_id`: The unique ID of the detector to update. @@ -2749,6 +3054,50 @@ function update_ipset( ) end +""" + update_malware_protection_plan(malware_protection_plan_id) + update_malware_protection_plan(malware_protection_plan_id, params::Dict{String,<:Any}) + +Updates an existing Malware Protection plan resource. + +# Arguments +- `malware_protection_plan_id`: A unique identifier associated with the Malware Protection + plan. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"actions"`: Information about whether the tags will be added to the S3 object after + scanning. +- `"protectedResource"`: Information about the protected resource that is associated with + the created Malware Protection plan. Presently, S3Bucket is the only supported protected + resource. +- `"role"`: IAM role with permissions required to scan and add tags to the associated + protected resource. +""" +function update_malware_protection_plan( + malwareProtectionPlanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return guardduty( + "PATCH", + "/malware-protection-plan/$(malwareProtectionPlanId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_malware_protection_plan( + malwareProtectionPlanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return guardduty( + "PATCH", + "/malware-protection-plan/$(malwareProtectionPlanId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_malware_scan_settings(detector_id) update_malware_scan_settings(detector_id, params::Dict{String,<:Any}) @@ -2796,9 +3145,13 @@ end update_member_detectors(account_ids, detector_id) update_member_detectors(account_ids, detector_id, params::Dict{String,<:Any}) -Contains information on member accounts to be updated. There might be regional differences -because some data sources might not be available in all the Amazon Web Services Regions -where GuardDuty is presently supported. For more information, see Regions and endpoints. +Contains information on member accounts to be updated. Specifying both EKS Runtime +Monitoring (EKS_RUNTIME_MONITORING) and Runtime Monitoring (RUNTIME_MONITORING) will cause +an error. You can add only one of these two features because Runtime Monitoring already +includes the threat detection for Amazon EKS resources. For more information, see Runtime +Monitoring. There might be regional differences because some data sources might not be +available in all the Amazon Web Services Regions where GuardDuty is presently supported. +For more information, see Regions and endpoints. # Arguments - `account_ids`: A list of member account IDs to be updated. @@ -2841,28 +3194,39 @@ end update_organization_configuration(detector_id) update_organization_configuration(detector_id, params::Dict{String,<:Any}) -Configures the delegated administrator account with the provided values. You must provide -the value for either autoEnableOrganizationMembers or autoEnable. There might be regional -differences because some data sources might not be available in all the Amazon Web Services -Regions where GuardDuty is presently supported. For more information, see Regions and -endpoints. +Configures the delegated administrator account with the provided values. You must provide a +value for either autoEnableOrganizationMembers or autoEnable, but not both. Specifying +both EKS Runtime Monitoring (EKS_RUNTIME_MONITORING) and Runtime Monitoring +(RUNTIME_MONITORING) will cause an error. You can add only one of these two features +because Runtime Monitoring already includes the threat detection for Amazon EKS resources. +For more information, see Runtime Monitoring. There might be regional differences because +some data sources might not be available in all the Amazon Web Services Regions where +GuardDuty is presently supported. For more information, see Regions and endpoints. # Arguments - `detector_id`: The ID of the detector that configures the delegated administrator. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoEnable"`: Indicates whether to automatically enable member accounts in the +- `"autoEnable"`: Represents whether or not to automatically enable member accounts in the organization. Even though this is still supported, we recommend using - AutoEnableOrganizationMembers to achieve the similar results. + AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for + either autoEnableOrganizationMembers or autoEnable. - `"autoEnableOrganizationMembers"`: Indicates the auto-enablement configuration of - GuardDuty for the member accounts in the organization. NEW: Indicates that when a new - account joins the organization, they will have GuardDuty enabled automatically. ALL: - Indicates that all accounts in the Amazon Web Services Organization have GuardDuty enabled - automatically. This includes NEW accounts that join the organization and accounts that may - have been suspended or removed from the organization in GuardDuty. NONE: Indicates that - GuardDuty will not be automatically enabled for any accounts in the organization. GuardDuty - must be managed for each account individually by the administrator. + GuardDuty for the member accounts in the organization. You must provide a value for either + autoEnableOrganizationMembers or autoEnable. Use one of the following configuration values + for autoEnableOrganizationMembers: NEW: Indicates that when a new account joins the + organization, they will have GuardDuty enabled automatically. ALL: Indicates that all + accounts in the organization have GuardDuty enabled automatically. This includes NEW + accounts that join the organization and accounts that may have been suspended or removed + from the organization in GuardDuty. It may take up to 24 hours to update the configuration + for all the member accounts. NONE: Indicates that GuardDuty will not be automatically + enabled for any account in the organization. The administrator must manage GuardDuty for + each account in the organization individually. When you update the auto-enable setting from + ALL or NEW to NONE, this action doesn't disable the corresponding option for your existing + accounts. This configuration will apply to the new accounts that join the organization. + After you update the auto-enable settings, no new account will have the corresponding + option as enabled. - `"dataSources"`: Describes which data sources will be updated. - `"features"`: A list of features that will be configured for the organization. """ diff --git a/src/services/health.jl b/src/services/health.jl index c8d8f8b52f..d9006c0a56 100644 --- a/src/services/health.jl +++ b/src/services/health.jl @@ -106,8 +106,8 @@ function describe_affected_entities( end """ - describe_affected_entities_for_organization(organization_entity_filters) - describe_affected_entities_for_organization(organization_entity_filters, params::Dict{String,<:Any}) + describe_affected_entities_for_organization() + describe_affected_entities_for_organization(params::Dict{String,<:Any}) Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can @@ -122,10 +122,6 @@ permissions. You can't use this operation to allow or deny access to specific He events. For more information, see Resource- and action-based conditions in the Health User Guide. -# Arguments -- `organization_entity_filters`: A JSON set of elements including the awsAccountId and the - eventArn. - # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"locale"`: The locale (language) to return information in. English (en) is the default @@ -136,31 +132,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value. +- `"organizationEntityAccountFilters"`: A JSON set of elements including the awsAccountId, + eventArn and a set of statusCodes. +- `"organizationEntityFilters"`: A JSON set of elements including the awsAccountId and the + eventArn. """ -function describe_affected_entities_for_organization( - organizationEntityFilters; aws_config::AbstractAWSConfig=global_aws_config() +function describe_affected_entities_for_organization(; + aws_config::AbstractAWSConfig=global_aws_config() ) return health( - "DescribeAffectedEntitiesForOrganization", - Dict{String,Any}("organizationEntityFilters" => organizationEntityFilters); + "DescribeAffectedEntitiesForOrganization"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function describe_affected_entities_for_organization( - organizationEntityFilters, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return health( "DescribeAffectedEntitiesForOrganization", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("organizationEntityFilters" => organizationEntityFilters), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -196,6 +187,50 @@ function describe_entity_aggregates( ) end +""" + describe_entity_aggregates_for_organization(event_arns) + describe_entity_aggregates_for_organization(event_arns, params::Dict{String,<:Any}) + +Returns a list of entity aggregates for your Organizations that are affected by each of the +specified events. + +# Arguments +- `event_arns`: A list of event ARNs (unique identifiers). For example: + \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREM + ENT_SCHEDULED_ABC123-CDE456\", + \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101 + \" + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsAccountIds"`: A list of 12-digit Amazon Web Services account numbers that contains + the affected entities. +""" +function describe_entity_aggregates_for_organization( + eventArns; aws_config::AbstractAWSConfig=global_aws_config() +) + return health( + "DescribeEntityAggregatesForOrganization", + Dict{String,Any}("eventArns" => eventArns); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_entity_aggregates_for_organization( + eventArns, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return health( + "DescribeEntityAggregatesForOrganization", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("eventArns" => eventArns), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_event_aggregates(aggregate_field) describe_event_aggregates(aggregate_field, params::Dict{String,<:Any}) diff --git a/src/services/healthlake.jl b/src/services/healthlake.jl index dd349ed7cc..4546c348b3 100644 --- a/src/services/healthlake.jl +++ b/src/services/healthlake.jl @@ -8,24 +8,24 @@ using AWS.UUIDs create_fhirdatastore(datastore_type_version) create_fhirdatastore(datastore_type_version, params::Dict{String,<:Any}) -Creates a Data Store that can ingest and export FHIR formatted data. +Creates a data store that can ingest and export FHIR formatted data. # Arguments -- `datastore_type_version`: The FHIR version of the Data Store. The only supported version +- `datastore_type_version`: The FHIR version of the data store. The only supported version is R4. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Optional user provided token used for ensuring idempotency. -- `"DatastoreName"`: The user generated name for the Data Store. +- `"DatastoreName"`: The user generated name for the data store. - `"IdentityProviderConfiguration"`: The configuration of the identity provider that you - want to use for your Data Store. -- `"PreloadDataConfig"`: Optional parameter to preload data upon creation of the Data - Store. Currently, the only supported preloaded data is synthetic data generated from + want to use for your data store. +- `"PreloadDataConfig"`: Optional parameter to preload data upon creation of the data + store. Currently, the only supported preloaded data is synthetic data generated from Synthea. - `"SseConfiguration"`: The server-side encryption key configuration for a customer - provided encryption key specified for creating a Data Store. -- `"Tags"`: Resource tags that are applied to a Data Store when it is created. + provided encryption key specified for creating a data store. +- `"Tags"`: Resource tags that are applied to a data store when it is created. """ function create_fhirdatastore( DatastoreTypeVersion; aws_config::AbstractAWSConfig=global_aws_config() @@ -65,10 +65,10 @@ end delete_fhirdatastore(datastore_id) delete_fhirdatastore(datastore_id, params::Dict{String,<:Any}) -Deletes a Data Store. +Deletes a data store. # Arguments -- `datastore_id`: The AWS-generated ID for the Data Store to be deleted. +- `datastore_id`: The AWS-generated ID for the data store to be deleted. """ function delete_fhirdatastore( @@ -100,12 +100,12 @@ end describe_fhirdatastore(datastore_id) describe_fhirdatastore(datastore_id, params::Dict{String,<:Any}) -Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data -Store ARN, Data Store name, Data Store status, created at, Data Store type version, and -Data Store endpoint. +Gets the properties associated with the FHIR data store, including the data store ID, data +store ARN, data store name, data store status, when the data store was created, data store +type version, and the data store's endpoint. # Arguments -- `datastore_id`: The AWS-generated Data Store ID. +- `datastore_id`: The AWS-generated data store ID. """ function describe_fhirdatastore( @@ -141,7 +141,7 @@ Displays the properties of a FHIR export job, including the ID, ARN, name, and t of the job. # Arguments -- `datastore_id`: The AWS generated ID for the Data Store from which files are being +- `datastore_id`: The AWS generated ID for the data store from which files are being exported from for an export job. - `job_id`: The AWS generated ID for an export job. @@ -184,7 +184,7 @@ Displays the properties of a FHIR import job, including the ID, ARN, name, and t of the job. # Arguments -- `datastore_id`: The AWS-generated ID of the Data Store. +- `datastore_id`: The AWS-generated ID of the data store. - `job_id`: The AWS-generated job ID. """ @@ -222,15 +222,15 @@ end list_fhirdatastores() list_fhirdatastores(params::Dict{String,<:Any}) -Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store +Lists all FHIR data stores that are in the user’s account, regardless of data store status. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: Lists all filters associated with a FHIR Data Store request. -- `"MaxResults"`: The maximum number of Data Stores returned in a single page of a +- `"Filter"`: Lists all filters associated with a FHIR data store request. +- `"MaxResults"`: The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest call. -- `"NextToken"`: Fetches the next page of Data Stores when results are paginated. +- `"NextToken"`: Fetches the next page of data stores when results are paginated. """ function list_fhirdatastores(; aws_config::AbstractAWSConfig=global_aws_config()) return healthlake( @@ -253,7 +253,7 @@ end # Arguments - `datastore_id`: This parameter limits the response to the export job with the specified - Data Store ID. + data store ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -303,7 +303,7 @@ end # Arguments - `datastore_id`: This parameter limits the response to the import job with the specified - Data Store ID. + data store ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -349,10 +349,10 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) - Returns a list of all existing tags associated with a Data Store. + Returns a list of all existing tags associated with a data store. # Arguments -- `resource_arn`: The Amazon Resource Name(ARN) of the Data Store for which tags are being +- `resource_arn`: The Amazon Resource Name(ARN) of the data store for which tags are being added. """ @@ -390,7 +390,7 @@ Begins a FHIR export job. # Arguments - `client_token`: An optional user provided token used for ensuring idempotency. - `data_access_role_arn`: The Amazon Resource Name used during the initiation of the job. -- `datastore_id`: The AWS generated ID for the Data Store from which files are being +- `datastore_id`: The AWS generated ID for the data store from which files are being exported for an export job. - `output_data_config`: The output data configuration that was supplied when the export job was created. @@ -453,9 +453,9 @@ Begins a FHIR Import job. # Arguments - `client_token`: Optional user provided token used for ensuring idempotency. -- `data_access_role_arn`: The Amazon Resource Name (ARN) that gives Amazon HealthLake - access permission. -- `datastore_id`: The AWS-generated Data Store ID. +- `data_access_role_arn`: The Amazon Resource Name (ARN) that gives AWS HealthLake access + permission. +- `datastore_id`: The AWS-generated data store ID. - `input_data_config`: The input properties of the FHIR Import job in the StartFHIRImport job request. - `job_output_data_config`: @@ -518,12 +518,12 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) - Adds a user specified key and value tag to a Data Store. + Adds a user specified key and value tag to a data store. # Arguments -- `resource_arn`: The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the - Data Store which tags are being added to. -- `tags`: The user specified key and value pair tags being added to a Data Store. +- `resource_arn`: The Amazon Resource Name(ARN)that gives AWS HealthLake access to the + data store which tags are being added to. +- `tags`: The user specified key and value pair tags being added to a data store. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -558,12 +558,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) - Removes tags from a Data Store. + Removes tags from a data store. # Arguments -- `resource_arn`: \"The Amazon Resource Name(ARN) of the Data Store for which tags are - being removed -- `tag_keys`: The keys for the tags to be removed from the Healthlake Data Store. +- `resource_arn`: The Amazon Resource Name(ARN) of the data store for which tags are being + removed. +- `tag_keys`: The keys for the tags to be removed from the HealthLake data store. """ function untag_resource( diff --git a/src/services/honeycode.jl b/src/services/honeycode.jl deleted file mode 100644 index 8decee5ef5..0000000000 --- a/src/services/honeycode.jl +++ /dev/null @@ -1,786 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: honeycode -using AWS.Compat -using AWS.UUIDs - -""" - batch_create_table_rows(rows_to_create, table_id, workbook_id) - batch_create_table_rows(rows_to_create, table_id, workbook_id, params::Dict{String,<:Any}) - - The BatchCreateTableRows API allows you to create one or more rows at the end of a table -in a workbook. The API allows you to specify the values to set in some or all of the -columns in the new rows. If a column is not explicitly set in a specific row, then the -column level formula specified in the table will be applied to the new row. If there is no -column level formula but the last row of the table has a formula, then that formula will be -copied down to the new row. If there is no column level formula and no formula in the last -row of the table, then that column will be left blank for the new rows. - -# Arguments -- `rows_to_create`: The list of rows to create at the end of the table. Each item in this - list needs to have a batch item id to uniquely identify the element in the request and the - cells to create for that row. You need to specify at least one item in this list. Note - that if one of the column ids in any of the rows in the request does not exist in the - table, then the request fails and no updates are made to the table. -- `table_id`: The ID of the table where the new rows are being added. If a table with the - specified ID could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook where the new rows are being added. If a workbook - with the specified ID could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The request token for performing the batch create operation. - Request tokens help to identify duplicate requests. If a call times out or fails due to a - transient error like a failed network connection, you can retry the call with the same - request token. The service ensures that if the first call using that request token is - successfully performed, the second call will not perform the operation again. Note that - request tokens are valid only for a few minutes. You cannot use request tokens to dedupe - requests spanning hours or days. -""" -function batch_create_table_rows( - rowsToCreate, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchcreate", - Dict{String,Any}("rowsToCreate" => rowsToCreate); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function batch_create_table_rows( - rowsToCreate, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchcreate", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("rowsToCreate" => rowsToCreate), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - batch_delete_table_rows(row_ids, table_id, workbook_id) - batch_delete_table_rows(row_ids, table_id, workbook_id, params::Dict{String,<:Any}) - - The BatchDeleteTableRows API allows you to delete one or more rows from a table in a -workbook. You need to specify the ids of the rows that you want to delete from the table. - -# Arguments -- `row_ids`: The list of row ids to delete from the table. You need to specify at least - one row id in this list. Note that if one of the row ids provided in the request does not - exist in the table, then the request fails and no rows are deleted from the table. -- `table_id`: The ID of the table where the rows are being deleted. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook where the rows are being deleted. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The request token for performing the delete action. Request - tokens help to identify duplicate requests. If a call times out or fails due to a transient - error like a failed network connection, you can retry the call with the same request token. - The service ensures that if the first call using that request token is successfully - performed, the second call will not perform the action again. Note that request tokens - are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning - hours or days. -""" -function batch_delete_table_rows( - rowIds, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchdelete", - Dict{String,Any}("rowIds" => rowIds); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function batch_delete_table_rows( - rowIds, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchdelete", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("rowIds" => rowIds), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - batch_update_table_rows(rows_to_update, table_id, workbook_id) - batch_update_table_rows(rows_to_update, table_id, workbook_id, params::Dict{String,<:Any}) - - The BatchUpdateTableRows API allows you to update one or more rows in a table in a -workbook. You can specify the values to set in some or all of the columns in the table -for the specified rows. If a column is not explicitly specified in a particular row, then -that column will not be updated for that row. To clear out the data in a specific cell, you -need to set the value as an empty string (\"\"). - -# Arguments -- `rows_to_update`: The list of rows to update in the table. Each item in this list needs - to contain the row id to update along with the map of column id to cell values for each - column in that row that needs to be updated. You need to specify at least one row in this - list, and for each row, you need to specify at least one column to update. Note that if - one of the row or column ids in the request does not exist in the table, then the request - fails and no updates are made to the table. -- `table_id`: The ID of the table where the rows are being updated. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook where the rows are being updated. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The request token for performing the update action. Request - tokens help to identify duplicate requests. If a call times out or fails due to a transient - error like a failed network connection, you can retry the call with the same request token. - The service ensures that if the first call using that request token is successfully - performed, the second call will not perform the action again. Note that request tokens - are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning - hours or days. -""" -function batch_update_table_rows( - rowsToUpdate, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchupdate", - Dict{String,Any}("rowsToUpdate" => rowsToUpdate); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function batch_update_table_rows( - rowsToUpdate, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchupdate", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("rowsToUpdate" => rowsToUpdate), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - batch_upsert_table_rows(rows_to_upsert, table_id, workbook_id) - batch_upsert_table_rows(rows_to_upsert, table_id, workbook_id, params::Dict{String,<:Any}) - - The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert -operation takes a filter expression as input and evaluates it to find matching rows on the -destination table. If matching rows are found, it will update the cells in the matching -rows to new values specified in the request. If no matching rows are found, a new row is -added at the end of the table and the cells in that row are set to the new values specified -in the request. You can specify the values to set in some or all of the columns in the -table for the matching or newly appended rows. If a column is not explicitly specified for -a particular row, then that column will not be updated for that row. To clear out the data -in a specific cell, you need to set the value as an empty string (\"\"). - -# Arguments -- `rows_to_upsert`: The list of rows to upsert in the table. Each item in this list needs - to have a batch item id to uniquely identify the element in the request, a filter - expression to find the rows to update for that element and the cell values to set for each - column in the upserted rows. You need to specify at least one item in this list. Note - that if one of the filter formulas in the request fails to evaluate because of an error or - one of the column ids in any of the rows does not exist in the table, then the request - fails and no updates are made to the table. -- `table_id`: The ID of the table where the rows are being upserted. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook where the rows are being upserted. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The request token for performing the update action. Request - tokens help to identify duplicate requests. If a call times out or fails due to a transient - error like a failed network connection, you can retry the call with the same request token. - The service ensures that if the first call using that request token is successfully - performed, the second call will not perform the action again. Note that request tokens - are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning - hours or days. -""" -function batch_upsert_table_rows( - rowsToUpsert, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchupsert", - Dict{String,Any}("rowsToUpsert" => rowsToUpsert); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function batch_upsert_table_rows( - rowsToUpsert, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/batchupsert", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("rowsToUpsert" => rowsToUpsert), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_table_data_import_job(job_id, table_id, workbook_id) - describe_table_data_import_job(job_id, table_id, workbook_id, params::Dict{String,<:Any}) - - The DescribeTableDataImportJob API allows you to retrieve the status and details of a -table data import job. - -# Arguments -- `job_id`: The ID of the job that was returned by the StartTableDataImportJob request. If - a job with the specified id could not be found, this API throws ResourceNotFoundException. -- `table_id`: The ID of the table into which data was imported. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook into which data was imported. If a workbook with - the specified id could not be found, this API throws ResourceNotFoundException. - -""" -function describe_table_data_import_job( - jobId, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables/$(tableId)/import/$(jobId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_table_data_import_job( - jobId, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables/$(tableId)/import/$(jobId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_screen_data(app_id, screen_id, workbook_id) - get_screen_data(app_id, screen_id, workbook_id, params::Dict{String,<:Any}) - - The GetScreenData API allows retrieval of data from a screen in a Honeycode app. The API -allows setting local variables in the screen to filter, sort or otherwise affect what will -be displayed on the screen. - -# Arguments -- `app_id`: The ID of the app that contains the screen. -- `screen_id`: The ID of the screen. -- `workbook_id`: The ID of the workbook that contains the screen. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The number of results to be returned on a single page. Specify a number - between 1 and 100. The maximum value is 100. This parameter is optional. If you don't - specify this parameter, the default page size is 100. -- `"nextToken"`: This parameter is optional. If a nextToken is not specified, the API - returns the first page of data. Pagination tokens expire after 1 hour. If you use a token - that was returned more than an hour back, the API will throw ValidationException. -- `"variables"`: Variables are optional and are needed only if the screen requires them to - render correctly. Variables are specified as a map where the key is the name of the - variable as defined on the screen. The value is an object which currently has only one - property, rawValue, which holds the value of the variable to be passed to the screen. -""" -function get_screen_data( - appId, screenId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/screendata", - Dict{String,Any}( - "appId" => appId, "screenId" => screenId, "workbookId" => workbookId - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_screen_data( - appId, - screenId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/screendata", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "appId" => appId, "screenId" => screenId, "workbookId" => workbookId - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - invoke_screen_automation(app_id, automation_id, screen_id, workbook_id) - invoke_screen_automation(app_id, automation_id, screen_id, workbook_id, params::Dict{String,<:Any}) - - The InvokeScreenAutomation API allows invoking an action defined in a screen in a -Honeycode app. The API allows setting local variables, which can then be used in the -automation being invoked. This allows automating the Honeycode app interactions to write, -update or delete data in the workbook. - -# Arguments -- `app_id`: The ID of the app that contains the screen automation. -- `automation_id`: The ID of the automation action to be performed. -- `screen_id`: The ID of the screen that contains the screen automation. -- `workbook_id`: The ID of the workbook that contains the screen automation. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: The request token for performing the automation action. Request - tokens help to identify duplicate requests. If a call times out or fails due to a transient - error like a failed network connection, you can retry the call with the same request token. - The service ensures that if the first call using that request token is successfully - performed, the second call will return the response of the previous call rather than - performing the action again. Note that request tokens are valid only for a few minutes. - You cannot use request tokens to dedupe requests spanning hours or days. -- `"rowId"`: The row ID for the automation if the automation is defined inside a block - with source or list. -- `"variables"`: Variables are specified as a map where the key is the name of the - variable as defined on the screen. The value is an object which currently has only one - property, rawValue, which holds the value of the variable to be passed to the screen. Any - variables defined in a screen are required to be passed in the call. -""" -function invoke_screen_automation( - appId, - automationId, - screenId, - workbookId; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/apps/$(appId)/screens/$(screenId)/automations/$(automationId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function invoke_screen_automation( - appId, - automationId, - screenId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/apps/$(appId)/screens/$(screenId)/automations/$(automationId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_table_columns(table_id, workbook_id) - list_table_columns(table_id, workbook_id, params::Dict{String,<:Any}) - - The ListTableColumns API allows you to retrieve a list of all the columns in a table in a -workbook. - -# Arguments -- `table_id`: The ID of the table whose columns are being retrieved. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook that contains the table whose columns are being - retrieved. If a workbook with the specified id could not be found, this API throws - ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"nextToken"`: This parameter is optional. If a nextToken is not specified, the API - returns the first page of data. Pagination tokens expire after 1 hour. If you use a token - that was returned more than an hour back, the API will throw ValidationException. -""" -function list_table_columns( - tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables/$(tableId)/columns"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_table_columns( - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables/$(tableId)/columns", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_table_rows(table_id, workbook_id) - list_table_rows(table_id, workbook_id, params::Dict{String,<:Any}) - - The ListTableRows API allows you to retrieve a list of all the rows in a table in a -workbook. - -# Arguments -- `table_id`: The ID of the table whose rows are being retrieved. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook that contains the table whose rows are being - retrieved. If a workbook with the specified id could not be found, this API throws - ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of rows to return in each page of the results. -- `"nextToken"`: This parameter is optional. If a nextToken is not specified, the API - returns the first page of data. Pagination tokens expire after 1 hour. If you use a token - that was returned more than an hour back, the API will throw ValidationException. -- `"rowIds"`: This parameter is optional. If one or more row ids are specified in this - list, then only the specified row ids are returned in the result. If no row ids are - specified here, then all the rows in the table are returned. -""" -function list_table_rows( - tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/list"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_table_rows( - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/list", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_tables(workbook_id) - list_tables(workbook_id, params::Dict{String,<:Any}) - - The ListTables API allows you to retrieve a list of all the tables in a workbook. - -# Arguments -- `workbook_id`: The ID of the workbook whose tables are being retrieved. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of tables to return in each page of the results. -- `"nextToken"`: This parameter is optional. If a nextToken is not specified, the API - returns the first page of data. Pagination tokens expire after 1 hour. If you use a token - that was returned more than an hour back, the API will throw ValidationException. -""" -function list_tables(workbookId; aws_config::AbstractAWSConfig=global_aws_config()) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_tables( - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "GET", - "/workbooks/$(workbookId)/tables", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_tags_for_resource(resource_arn) - list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) - - The ListTagsForResource API allows you to return a resource's tags. - -# Arguments -- `resource_arn`: The resource's Amazon Resource Name (ARN). - -""" -function list_tags_for_resource( - resourceArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "GET", - "/tags/$(resourceArn)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_tags_for_resource( - resourceArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "GET", - "/tags/$(resourceArn)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - query_table_rows(filter_formula, table_id, workbook_id) - query_table_rows(filter_formula, table_id, workbook_id, params::Dict{String,<:Any}) - - The QueryTableRows API allows you to use a filter formula to query for specific rows in a -table. - -# Arguments -- `filter_formula`: An object that represents a filter formula along with the id of the - context row under which the filter function needs to evaluate. -- `table_id`: The ID of the table whose rows are being queried. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook whose table rows are being queried. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of rows to return in each page of the results. -- `"nextToken"`: This parameter is optional. If a nextToken is not specified, the API - returns the first page of data. Pagination tokens expire after 1 hour. If you use a token - that was returned more than an hour back, the API will throw ValidationException. -""" -function query_table_rows( - filterFormula, tableId, workbookId; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/query", - Dict{String,Any}("filterFormula" => filterFormula); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function query_table_rows( - filterFormula, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/rows/query", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("filterFormula" => filterFormula), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - start_table_data_import_job(client_request_token, data_format, data_source, import_options, table_id, workbook_id) - start_table_data_import_job(client_request_token, data_format, data_source, import_options, table_id, workbook_id, params::Dict{String,<:Any}) - - The StartTableDataImportJob API allows you to start an import job on a table. This API -will only return the id of the job that was started. To find out the status of the import -request, you need to call the DescribeTableDataImportJob API. - -# Arguments -- `client_request_token`: The request token for performing the update action. Request - tokens help to identify duplicate requests. If a call times out or fails due to a transient - error like a failed network connection, you can retry the call with the same request token. - The service ensures that if the first call using that request token is successfully - performed, the second call will not perform the action again. Note that request tokens - are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning - hours or days. -- `data_format`: The format of the data that is being imported. Currently the only option - supported is \"DELIMITED_TEXT\". -- `data_source`: The source of the data that is being imported. The size of source must be - no larger than 100 MB. Source must have no more than 100,000 cells and no more than 1,000 - rows. -- `import_options`: The options for customizing this import request. -- `table_id`: The ID of the table where the rows are being imported. If a table with the - specified id could not be found, this API throws ResourceNotFoundException. -- `workbook_id`: The ID of the workbook where the rows are being imported. If a workbook - with the specified id could not be found, this API throws ResourceNotFoundException. - -""" -function start_table_data_import_job( - clientRequestToken, - dataFormat, - dataSource, - importOptions, - tableId, - workbookId; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/import", - Dict{String,Any}( - "clientRequestToken" => clientRequestToken, - "dataFormat" => dataFormat, - "dataSource" => dataSource, - "importOptions" => importOptions, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function start_table_data_import_job( - clientRequestToken, - dataFormat, - dataSource, - importOptions, - tableId, - workbookId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/workbooks/$(workbookId)/tables/$(tableId)/import", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "clientRequestToken" => clientRequestToken, - "dataFormat" => dataFormat, - "dataSource" => dataSource, - "importOptions" => importOptions, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - tag_resource(resource_arn, tags) - tag_resource(resource_arn, tags, params::Dict{String,<:Any}) - - The TagResource API allows you to add tags to an ARN-able resource. Resource includes -workbook, table, screen and screen-automation. - -# Arguments -- `resource_arn`: The resource's Amazon Resource Name (ARN). -- `tags`: A list of tags to apply to the resource. - -""" -function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) - return honeycode( - "POST", - "/tags/$(resourceArn)", - Dict{String,Any}("tags" => tags); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function tag_resource( - resourceArn, - tags, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "POST", - "/tags/$(resourceArn)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - untag_resource(resource_arn, tag_keys) - untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) - - The UntagResource API allows you to removes tags from an ARN-able resource. Resource -includes workbook, table, screen and screen-automation. - -# Arguments -- `resource_arn`: The resource's Amazon Resource Name (ARN). -- `tag_keys`: A list of tag keys to remove from the resource. - -""" -function untag_resource( - resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() -) - return honeycode( - "DELETE", - "/tags/$(resourceArn)", - Dict{String,Any}("tagKeys" => tagKeys); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function untag_resource( - resourceArn, - tagKeys, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return honeycode( - "DELETE", - "/tags/$(resourceArn)", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/iam.jl b/src/services/iam.jl index 0118ff1d9e..e2b287fd1f 100644 --- a/src/services/iam.jl +++ b/src/services/iam.jl @@ -66,9 +66,9 @@ and then add a different role to an instance profile. You must then wait for the appear across all of Amazon Web Services because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it. The caller of this operation must be -granted the PassRole permission on the IAM role by a permissions policy. For more -information about roles, see Working with roles. For more information about instance -profiles, see About instance profiles. +granted the PassRole permission on the IAM role by a permissions policy. For more +information about roles, see IAM roles in the IAM User Guide. For more information about +instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to update. This parameter @@ -165,7 +165,7 @@ end Attaches the specified managed policy to the specified IAM group. You use this operation to attach a managed policy to a group. To embed an inline policy in a group, use -PutGroupPolicy. As a best practice, you can validate your IAM policies. To learn more, see +PutGroupPolicy . As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. For more information about policies, see Managed policies and inline policies in the IAM User Guide. @@ -216,9 +216,9 @@ end Attaches the specified managed policy to the specified IAM role. When you attach a managed policy to a role, the managed policy becomes part of the role's permission (access) policy. You cannot use a managed policy as the role's trust policy. The role's trust policy is -created at the same time as the role, using CreateRole. You can update a role's trust -policy using UpdateAssumeRolePolicy. Use this operation to attach a managed policy to a -role. To embed an inline policy in a role, use PutRolePolicy. For more information about +created at the same time as the role, using CreateRole . You can update a role's trust +policy using UpdateAssumerolePolicy . Use this operation to attach a managed policy to a +role. To embed an inline policy in a role, use PutRolePolicy . For more information about policies, see Managed policies and inline policies in the IAM User Guide. As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. @@ -268,8 +268,8 @@ end attach_user_policy(policy_arn, user_name, params::Dict{String,<:Any}) Attaches the specified managed policy to the specified user. You use this operation to -attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy. -As a best practice, you can validate your IAM policies. To learn more, see Validating IAM +attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy +. As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. For more information about policies, see Managed policies and inline policies in the IAM User Guide. @@ -608,8 +608,8 @@ function create_login_profile( end """ - create_open_idconnect_provider(thumbprint_list, url) - create_open_idconnect_provider(thumbprint_list, url, params::Dict{String,<:Any}) + create_open_idconnect_provider(url) + create_open_idconnect_provider(url, params::Dict{String,<:Any}) Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC). The OIDC provider that you create with this operation can be used as a principal in @@ -626,28 +626,15 @@ OIDC provider A list of tags that are attached to the specified IAM OIDC provi list of thumbprints of one or more server certificates that the IdP uses You get all of this information from the OIDC IdP you want to use to access Amazon Web Services. Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our -library of trusted certificate authorities (CAs) instead of using a certificate thumbprint -to verify your IdP server certificate. These OIDC IdPs include Google, Auth0, and those -that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, -your legacy thumbprint remains in your configuration, but is no longer used for validation. - The trust for the OIDC provider is derived from the IAM provider that this operation -creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation -to highly privileged users. - -# Arguments -- `thumbprint_list`: A list of server certificate thumbprints for the OpenID Connect (OIDC) - identity provider's server certificates. Typically this list includes only one entry. - However, IAM lets you have up to five thumbprints for an OIDC provider. This lets you - maintain multiple thumbprints if the identity provider is rotating certificates. The server - certificate thumbprint is the hex-encoded SHA-1 hash value of the X.509 certificate used by - the domain where the OpenID Connect provider makes its keys available. It is always a - 40-character string. You must provide at least one thumbprint when creating an IAM OIDC - provider. For example, assume that the OIDC provider is server.example.com and the provider - stores its keys at https://keys.server.example.com/openid-connect. In that case, the - thumbprint string would be the hex-encoded SHA-1 hash value of the certificate used by - https://keys.server.example.com. For more information about obtaining the OIDC provider - thumbprint, see Obtaining the thumbprint for an OpenID Connect provider in the IAM user - Guide. +library of trusted root certificate authorities (CAs) instead of using a certificate +thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint +remains in your configuration, but is no longer used for validation. These OIDC IdPs +include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a +JSON Web Key Set (JWKS) endpoint. The trust for the OIDC provider is derived from the IAM +provider that this operation creates. Therefore, it is best to limit access to the +CreateOpenIDConnectProvider operation to highly privileged users. + +# Arguments - `url`: The URL of the identity provider. The URL must begin with https:// and should correspond to the iss claim in the provider's OpenID Connect ID tokens. Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL @@ -672,32 +659,38 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys about tagging, see Tagging IAM resources in the IAM User Guide. If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created. +- `"ThumbprintList"`: A list of server certificate thumbprints for the OpenID Connect + (OIDC) identity provider's server certificates. Typically this list includes only one + entry. However, IAM lets you have up to five thumbprints for an OIDC provider. This lets + you maintain multiple thumbprints if the identity provider is rotating certificates. This + parameter is optional. If it is not included, IAM will retrieve and use the top + intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider + server certificate. The server certificate thumbprint is the hex-encoded SHA-1 hash value + of the X.509 certificate used by the domain where the OpenID Connect provider makes its + keys available. It is always a 40-character string. For example, assume that the OIDC + provider is server.example.com and the provider stores its keys at + https://keys.server.example.com/openid-connect. In that case, the thumbprint string would + be the hex-encoded SHA-1 hash value of the certificate used by + https://keys.server.example.com. For more information about obtaining the OIDC provider + thumbprint, see Obtaining the thumbprint for an OpenID Connect provider in the IAM user + Guide. """ function create_open_idconnect_provider( - ThumbprintList, Url; aws_config::AbstractAWSConfig=global_aws_config() + Url; aws_config::AbstractAWSConfig=global_aws_config() ) return iam( "CreateOpenIDConnectProvider", - Dict{String,Any}("ThumbprintList" => ThumbprintList, "Url" => Url); + Dict{String,Any}("Url" => Url); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_open_idconnect_provider( - ThumbprintList, - Url, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + Url, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return iam( "CreateOpenIDConnectProvider", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ThumbprintList" => ThumbprintList, "Url" => Url), - params, - ), - ); + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Url" => Url), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -856,9 +849,9 @@ end create_role(assume_role_policy_document, role_name) create_role(assume_role_policy_document, role_name, params::Dict{String,<:Any}) -Creates a new role for your Amazon Web Services account. For more information about roles, -see IAM roles. For information about quotas for role names and the number of roles you can -create, see IAM and STS quotas in the IAM User Guide. +Creates a new role for your Amazon Web Services account. For more information about roles, +see IAM roles in the IAM User Guide. For information about quotas for role names and the +number of roles you can create, see IAM and STS quotas in the IAM User Guide. # Arguments - `assume_role_policy_document`: The trust relationship policy document that grants an @@ -1503,7 +1496,7 @@ Deletes the specified instance profile. The instance profile must not have an as role. Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance. For more -information about instance profiles, see About instance profiles. +information about instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to delete. This parameter @@ -2689,7 +2682,9 @@ Generates a report that includes details about when an IAM resource (user, group policy) was last used in an attempt to access Amazon Web Services services. Recent activity usually appears within four hours. IAM reports activity for at least the last 400 days, or less if your Region began supporting this feature within the last year. For more -information, see Regions where data is tracked. The service last accessed data +information, see Regions where data is tracked. For more information about services and +actions for which action last accessed information is displayed, see IAM action last +accessed information services and actions. The service last accessed data includes all attempts to access an Amazon Web Services API, not just the successful ones. This includes all attempts that were made using the Amazon Web Services Management Console, the Amazon Web Services API through any of the SDKs, or any of the command line tools. An @@ -3137,7 +3132,7 @@ end Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see -About instance profiles in the IAM User Guide. +Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to get information about. This @@ -3219,6 +3214,43 @@ function get_login_profile( ) end +""" + get_mfadevice(serial_number) + get_mfadevice(serial_number, params::Dict{String,<:Any}) + +Retrieves information about an MFA device for a specified user. + +# Arguments +- `serial_number`: Serial number that uniquely identifies the MFA device. For this API, we + only accept FIDO security key ARNs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"UserName"`: The friendly name identifying the user. +""" +function get_mfadevice(SerialNumber; aws_config::AbstractAWSConfig=global_aws_config()) + return iam( + "GetMFADevice", + Dict{String,Any}("SerialNumber" => SerialNumber); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_mfadevice( + SerialNumber, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iam( + "GetMFADevice", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SerialNumber" => SerialNumber), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_open_idconnect_provider(open_idconnect_provider_arn) get_open_idconnect_provider(open_idconnect_provider_arn, params::Dict{String,<:Any}) @@ -3426,11 +3458,11 @@ end Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information -about roles, see Working with roles. Policies returned by this operation are URL-encoded -compliant with RFC 3986. You can use a URL decoding method to convert the policy back to -plain JSON text. For example, if you use Java, you can use the decode method of the -java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar -functionality. +about roles, see IAM roles in the IAM User Guide. Policies returned by this operation are +URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the +policy back to plain JSON text. For example, if you use Java, you can use the decode method +of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide +similar functionality. # Arguments - `role_name`: The name of the IAM role to get information about. This parameter allows @@ -3474,8 +3506,8 @@ Java SDK. Other languages and SDKs provide similar functionality. An IAM role c have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document. For more information about policies, see -Managed policies and inline policies in the IAM User Guide. For more information about -roles, see Using roles to delegate permissions and federate identities. +Managed policies and inline policies in the IAM User Guide. For more information about +roles, see IAM roles in the IAM User Guide. # Arguments - `policy_name`: The name of the policy document to get. This parameter allows (through its @@ -3938,10 +3970,10 @@ parameters. If the UserName is not specified, the user name is determined implic on the Amazon Web Services access key ID used to sign the request. If a temporary access key is used, then UserName is required. If a long-term key is assigned to the user, then UserName is not required. This operation works for access keys under the Amazon Web -Services account. Consequently, you can use this operation to manage Amazon Web Services -account root user credentials even if the Amazon Web Services account has no associated -users. To ensure the security of your Amazon Web Services account, the secret access key -is accessible only during key and user creation. +Services account. If the Amazon Web Services account has no associated users, the root user +returns it's own access key IDs by running this command. To ensure the security of your +Amazon Web Services account, the secret access key is accessible only during key and user +creation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4458,12 +4490,12 @@ end list_instance_profiles(params::Dict{String,<:Any}) Lists the instance profiles that have the specified path prefix. If there are none, the -operation returns an empty list. For more information about instance profiles, see About -instance profiles. IAM resource-listing operations return a subset of the available -attributes for the resource. For example, this operation does not return tags, even though -they are an attribute of the returned object. To view all of the information for an -instance profile, see GetInstanceProfile. You can paginate the results using the MaxItems -and Marker parameters. +operation returns an empty list. For more information about instance profiles, see Using +instance profiles in the IAM User Guide. IAM resource-listing operations return a subset +of the available attributes for the resource. For example, this operation does not return +tags, even though they are an attribute of the returned object. To view all of the +information for an instance profile, see GetInstanceProfile. You can paginate the results +using the MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4508,8 +4540,8 @@ end Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to -About instance profiles. You can paginate the results using the MaxItems and Marker -parameters. +Using instance profiles in the IAM User Guide. You can paginate the results using the +MaxItems and Marker parameters. # Arguments - `role_name`: The name of the role to list instance profiles for. This parameter allows @@ -5064,11 +5096,12 @@ end list_roles(params::Dict{String,<:Any}) Lists the IAM roles that have the specified path prefix. If there are none, the operation -returns an empty list. For more information about roles, see Working with roles. IAM -resource-listing operations return a subset of the available attributes for the resource. -For example, this operation does not return tags, even though they are an attribute of the -returned object. To view all of the information for a role, see GetRole. You can paginate -the results using the MaxItems and Marker parameters. +returns an empty list. For more information about roles, see IAM roles in the IAM User +Guide. IAM resource-listing operations return a subset of the available attributes for the +resource. This operation does not return the following attributes, even though they are an +attribute of the returned object: PermissionsBoundary RoleLastUsed Tags To view all +of the information for a role, see GetRole. You can paginate the results using the +MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5525,10 +5558,10 @@ end Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list. IAM resource-listing operations return a subset of the -available attributes for the resource. For example, this operation does not return tags, -even though they are an attribute of the returned object. To view all of the information -for a user, see GetUser. You can paginate the results using the MaxItems and Marker -parameters. +available attributes for the resource. This operation does not return the following +attributes, even though they are an attribute of the returned object: PermissionsBoundary + Tags To view all of the information for a user, see GetUser. You can paginate the +results using the MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5610,10 +5643,10 @@ end Adds or updates an inline policy document that is embedded in the specified IAM group. A user can also have managed policies attached to it. To attach a managed policy to a group, -use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information -about policies, see Managed policies and inline policies in the IAM User Guide. For -information about the maximum number of inline policies that you can embed in a group, see -IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you +use AttachGroupPolicy . To create a new managed policy, use CreatePolicy . For +information about policies, see Managed policies and inline policies in the IAM User Guide. +For information about the maximum number of inline policies that you can embed in a group, +see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide. @@ -5625,7 +5658,7 @@ using the Query API with IAM, see Making query requests in the IAM User Guide. - `policy_document`: The policy document. You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before - submitting it to = IAM. The regex pattern used to validate this parameter is a string of + submitting it to IAM. The regex pattern used to validate this parameter is a string of characters consisting of the following: Any printable ASCII character ranging from the space character (u0020) through the end of the ASCII character range The printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF) The @@ -5739,16 +5772,15 @@ end Adds or updates an inline policy document that is embedded in the specified IAM role. When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the -role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. -For more information about IAM roles, see Using roles to delegate permissions and federate -identities. A role can also have a managed policy attached to it. To attach a managed -policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. -For information about policies, see Managed policies and inline policies in the IAM User -Guide. For information about the maximum number of inline policies that you can embed with -a role, see IAM and STS quotas in the IAM User Guide. Because policy documents can be -large, you should use POST rather than GET when calling PutRolePolicy. For general -information about using the Query API with IAM, see Making query requests in the IAM User -Guide. +role, using CreateRole . You can update a role's trust policy using +UpdateAssumeRolePolicy . For more information about roles, see IAM roles in the IAM User +Guide. A role can also have a managed policy attached to it. To attach a managed policy to +a role, use AttachRolePolicy . To create a new managed policy, use CreatePolicy . For +information about policies, see Managed policies and inline policies in the IAM User Guide. +For information about the maximum number of inline policies that you can embed with a role, +see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you +should use POST rather than GET when calling PutRolePolicy. For general information about +using the Query API with IAM, see Making query requests in the IAM User Guide. # Arguments - `policy_document`: The policy document. You must provide policies in JSON format in IAM. @@ -5871,7 +5903,7 @@ end Adds or updates an inline policy document that is embedded in the specified IAM user. An IAM user can also have a managed policy attached to it. To attach a managed policy to a -user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For +user, use AttachUserPolicy . To create a new managed policy, use CreatePolicy . For information about policies, see Managed policies and inline policies in the IAM User Guide. For information about the maximum number of inline policies that you can embed in a user, see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you @@ -5992,12 +6024,12 @@ end remove_role_from_instance_profile(instance_profile_name, role_name) remove_role_from_instance_profile(instance_profile_name, role_name, params::Dict{String,<:Any}) -Removes the specified IAM role from the specified EC2 instance profile. Make sure that you -do not have any Amazon EC2 instances running with the role you are about to remove from the -instance profile. Removing a role from an instance profile that is associated with a -running instance might break any applications running on the instance. For more -information about IAM roles, see Working with roles. For more information about instance -profiles, see About instance profiles. +Removes the specified IAM role from the specified Amazon EC2 instance profile. Make sure +that you do not have any Amazon EC2 instances running with the role you are about to remove +from the instance profile. Removing a role from an instance profile that is associated with +a running instance might break any applications running on the instance. For more +information about roles, see IAM roles in the IAM User Guide. For more information about +instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to update. This parameter @@ -6413,16 +6445,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources - that you must define to run the simulation. Each of the EC2 scenarios requires that you - specify instance, image, and security group resources. If your scenario includes an EBS - volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, - then you must supply the network interface resource. If it includes an IP subnet, then you - must specify the subnet resource. For more information on the EC2 scenario options, see - Supported platforms in the Amazon EC2 User Guide. EC2-VPC-InstanceStore instance, - image, security group, network interface EC2-VPC-InstanceStore-Subnet instance, image, - security group, network interface, subnet EC2-VPC-EBS instance, image, security group, - network interface, volume EC2-VPC-EBS-Subnet instance, image, security group, network - interface, subnet, volume + that you must define to run the simulation. Each of the Amazon EC2 scenarios requires that + you specify instance, image, and security group resources. If your scenario includes an EBS + volume, then you must specify that volume as a resource. If the Amazon EC2 scenario + includes VPC, then you must supply the network interface resource. If it includes an IP + subnet, then you must specify the subnet resource. For more information on the Amazon EC2 + scenario options, see Supported platforms in the Amazon EC2 User Guide. + EC2-VPC-InstanceStore instance, image, security group, network interface + EC2-VPC-InstanceStore-Subnet instance, image, security group, network interface, subnet + EC2-VPC-EBS instance, image, security group, network interface, volume + EC2-VPC-EBS-Subnet instance, image, security group, network interface, subnet, volume - `"ResourceOwner"`: An ARN representing the Amazon Web Services account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN. Examples of resource ARNs include an S3 bucket or object. If ResourceOwner is specified, it @@ -6588,16 +6620,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources - that you must define to run the simulation. Each of the EC2 scenarios requires that you - specify instance, image, and security group resources. If your scenario includes an EBS - volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, - then you must supply the network interface resource. If it includes an IP subnet, then you - must specify the subnet resource. For more information on the EC2 scenario options, see - Supported platforms in the Amazon EC2 User Guide. EC2-VPC-InstanceStore instance, - image, security group, network interface EC2-VPC-InstanceStore-Subnet instance, image, - security group, network interface, subnet EC2-VPC-EBS instance, image, security group, - network interface, volume EC2-VPC-EBS-Subnet instance, image, security group, network - interface, subnet, volume + that you must define to run the simulation. Each of the Amazon EC2 scenarios requires that + you specify instance, image, and security group resources. If your scenario includes an EBS + volume, then you must specify that volume as a resource. If the Amazon EC2 scenario + includes VPC, then you must supply the network interface resource. If it includes an IP + subnet, then you must specify the subnet resource. For more information on the Amazon EC2 + scenario options, see Supported platforms in the Amazon EC2 User Guide. + EC2-VPC-InstanceStore instance, image, security group, network interface + EC2-VPC-InstanceStore-Subnet instance, image, security group, network interface, subnet + EC2-VPC-EBS instance, image, security group, network interface, volume + EC2-VPC-EBS-Subnet instance, image, security group, network interface, subnet, volume - `"ResourceOwner"`: An Amazon Web Services account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN. Examples of resource ARNs include an S3 bucket or object. If ResourceOwner is specified, it is also @@ -7832,14 +7864,14 @@ are not merged.) Typically, you need to update a thumbprint only when the identi certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. Amazon Web Services secures -communication with some OIDC identity providers (IdPs) through our library of trusted +communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP -server certificate. These OIDC IdPs include Google, Auth0, and those that use an Amazon S3 -bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint -remains in your configuration, but is no longer used for validation. Trust for the OIDC -provider is derived from the provider certificate and is validated by the thumbprint. -Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint -operation to highly privileged users. +server certificate. In these cases, your legacy thumbprint remains in your configuration, +but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, +Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. + Trust for the OIDC provider is derived from the provider certificate and is validated by +the thumbprint. Therefore, it is best to limit access to the +UpdateOpenIDConnectProviderThumbprint operation to highly privileged users. # Arguments - `open_idconnect_provider_arn`: The Amazon Resource Name (ARN) of the IAM OIDC provider @@ -7911,7 +7943,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM roles in the IAM - User Guide. + User Guide. IAM role credentials provided by Amazon EC2 instances assigned to the role are + not subject to the specified maximum session duration. """ function update_role(RoleName; aws_config::AbstractAWSConfig=global_aws_config()) return iam( diff --git a/src/services/identitystore.jl b/src/services/identitystore.jl index e9e3e153cb..f9a3e0422d 100644 --- a/src/services/identitystore.jl +++ b/src/services/identitystore.jl @@ -17,8 +17,8 @@ Creates a group within the specified identity store. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: A string containing the description of the group. - `"DisplayName"`: A string containing the name of the group. This value is commonly - displayed when the group is referenced. \"Administrator\" and \"AWSAdministrators\" are - reserved names and can't be used for users or groups. + displayed when the group is referenced. Administrator and AWSAdministrators are reserved + names and can't be used for users or groups. """ function create_group(IdentityStoreId; aws_config::AbstractAWSConfig=global_aws_config()) return identitystore( @@ -128,8 +128,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"UserName"`: A unique string used to identify the user. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an - attribute of the user object in the identity store. \"Administrator\" and - \"AWSAdministrators\" are reserved names and can't be used for users or groups. + attribute of the user object in the identity store. Administrator and AWSAdministrators are + reserved names and can't be used for users or groups. - `"UserType"`: A string indicating the type of user. Possible values are left unspecified. The value can vary based on your specific use case. """ @@ -291,7 +291,9 @@ end describe_group(group_id, identity_store_id) describe_group(group_id, identity_store_id, params::Dict{String,<:Any}) -Retrieves the group metadata and attributes from GroupId in an identity store. +Retrieves the group metadata and attributes from GroupId in an identity store. If you have +administrator access to a member account, you can use this API from the member account. +Read about member accounts in the Organizations User Guide. # Arguments - `group_id`: The identifier for a group in the identity store. @@ -337,7 +339,9 @@ end describe_group_membership(identity_store_id, membership_id) describe_group_membership(identity_store_id, membership_id, params::Dict{String,<:Any}) -Retrieves membership metadata and attributes from MembershipId in an identity store. +Retrieves membership metadata and attributes from MembershipId in an identity store. If +you have administrator access to a member account, you can use this API from the member +account. Read about member accounts in the Organizations User Guide. # Arguments - `identity_store_id`: The globally unique identifier for the identity store. @@ -382,7 +386,9 @@ end describe_user(identity_store_id, user_id) describe_user(identity_store_id, user_id, params::Dict{String,<:Any}) -Retrieves the user metadata and attributes from the UserId in an identity store. +Retrieves the user metadata and attributes from the UserId in an identity store. If you +have administrator access to a member account, you can use this API from the member +account. Read about member accounts in the Organizations User Guide. # Arguments - `identity_store_id`: The globally unique identifier for the identity store, such as @@ -426,7 +432,9 @@ end get_group_id(alternate_identifier, identity_store_id) get_group_id(alternate_identifier, identity_store_id, params::Dict{String,<:Any}) -Retrieves GroupId in an identity store. +Retrieves GroupId in an identity store. If you have administrator access to a member +account, you can use this API from the member account. Read about member accounts in the +Organizations User Guide. # Arguments - `alternate_identifier`: A unique identifier for a user or group that is not the primary @@ -476,7 +484,9 @@ end get_group_membership_id(group_id, identity_store_id, member_id) get_group_membership_id(group_id, identity_store_id, member_id, params::Dict{String,<:Any}) -Retrieves the MembershipId in an identity store. +Retrieves the MembershipId in an identity store. If you have administrator access to a +member account, you can use this API from the member account. Read about member accounts in +the Organizations User Guide. # Arguments - `group_id`: The identifier for a group in the identity store. @@ -529,7 +539,9 @@ end get_user_id(alternate_identifier, identity_store_id) get_user_id(alternate_identifier, identity_store_id, params::Dict{String,<:Any}) -Retrieves the UserId in an identity store. +Retrieves the UserId in an identity store. If you have administrator access to a member +account, you can use this API from the member account. Read about member accounts in the +Organizations User Guide. # Arguments - `alternate_identifier`: A unique identifier for a user or group that is not the primary @@ -580,7 +592,8 @@ end is_member_in_groups(group_ids, identity_store_id, member_id, params::Dict{String,<:Any}) Checks the user's membership in all requested groups and returns if the member exists in -all queried groups. +all queried groups. If you have administrator access to a member account, you can use this +API from the member account. Read about member accounts in the Organizations User Guide. # Arguments - `group_ids`: A list of identifiers for groups in the identity store. @@ -632,7 +645,9 @@ end list_group_memberships(group_id, identity_store_id, params::Dict{String,<:Any}) For the specified group in the specified identity store, returns the list of all -GroupMembership objects and returns results in paginated form. +GroupMembership objects and returns results in paginated form. If you have administrator +access to a member account, you can use this API from the member account. Read about member +accounts in the Organizations User Guide. # Arguments - `group_id`: The identifier for a group in the identity store. @@ -684,7 +699,9 @@ end list_group_memberships_for_member(identity_store_id, member_id, params::Dict{String,<:Any}) For the specified member in the specified identity store, returns the list of all -GroupMembership objects and returns results in paginated form. +GroupMembership objects and returns results in paginated form. If you have administrator +access to a member account, you can use this API from the member account. Read about member +accounts in the Organizations User Guide. # Arguments - `identity_store_id`: The globally unique identifier for the identity store. @@ -740,7 +757,9 @@ end Lists all groups in the identity store. Returns a paginated list of complete Group objects. Filtering for a Group by the DisplayName attribute is deprecated. Instead, use the -GetGroupId API action. +GetGroupId API action. If you have administrator access to a member account, you can use +this API from the member account. Read about member accounts in the Organizations User +Guide. # Arguments - `identity_store_id`: The globally unique identifier for the identity store, such as @@ -791,7 +810,8 @@ end Lists all users in the identity store. Returns a paginated list of complete User objects. Filtering for a User by the UserName attribute is deprecated. Instead, use the GetUserId -API action. +API action. If you have administrator access to a member account, you can use this API +from the member account. Read about member accounts in the Organizations User Guide. # Arguments - `identity_store_id`: The globally unique identifier for the identity store, such as diff --git a/src/services/imagebuilder.jl b/src/services/imagebuilder.jl index 744a87c27f..01ef046b43 100644 --- a/src/services/imagebuilder.jl +++ b/src/services/imagebuilder.jl @@ -55,6 +55,56 @@ function cancel_image_creation( ) end +""" + cancel_lifecycle_execution(client_token, lifecycle_execution_id) + cancel_lifecycle_execution(client_token, lifecycle_execution_id, params::Dict{String,<:Any}) + +Cancel a specific image lifecycle policy runtime instance. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `lifecycle_execution_id`: Identifies the specific runtime instance of the image lifecycle + to cancel. + +""" +function cancel_lifecycle_execution( + clientToken, lifecycleExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "PUT", + "/CancelLifecycleExecution", + Dict{String,Any}( + "clientToken" => clientToken, "lifecycleExecutionId" => lifecycleExecutionId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_lifecycle_execution( + clientToken, + lifecycleExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/CancelLifecycleExecution", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "lifecycleExecutionId" => lifecycleExecutionId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_component(client_token, name, platform, semantic_version) create_component(client_token, name, platform, semantic_version, params::Dict{String,<:Any}) @@ -66,7 +116,8 @@ points to a YAML document file stored in Amazon S3, using the uri property in th body. # Arguments -- `client_token`: The idempotency token of the component. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `name`: The name of the component. - `platform`: The operating system platform of the component. - `semantic_version`: The semantic version of the component. This version follows the @@ -82,7 +133,7 @@ body. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"changeDescription"`: The change description of the component. Describes what change has - been made in this version, or what makes this version different from other versions of this + been made in this version, or what makes this version different from other versions of the component. - `"data"`: Component data contains inline YAML document content for the component. Alternatively, you can specify the uri of a YAML document file stored in Amazon S3. @@ -155,7 +206,8 @@ Creates a new container recipe. Container recipes define how images are configur and assessed. # Arguments -- `client_token`: The client token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `components`: Components for build and test that are included in the container recipe. Recipes require a minimum of one build component, and can have a maximum of 20 build and test components in any combination. @@ -257,7 +309,8 @@ Creates a new distribution configuration. Distribution configurations define and the outputs of your pipeline. # Arguments -- `client_token`: The idempotency token of the distribution configuration. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `distributions`: The distributions of the distribution configuration. - `name`: The name of the distribution configuration. @@ -314,7 +367,8 @@ output resources defined in the distribution configuration. You must specify exa recipe for your image, using either a ContainerRecipeArn or an ImageRecipeArn. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `infrastructure_configuration_arn`: The Amazon Resource Name (ARN) of the infrastructure configuration that defines the environment in which your image will be built and tested. @@ -327,11 +381,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedImageMetadataEnabled"`: Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default. +- `"executionRole"`: The name or Amazon Resource Name (ARN) for the IAM role you create + that grants Image Builder access to perform workflow actions. - `"imageRecipeArn"`: The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed. - `"imageScanningConfiguration"`: Contains settings for vulnerability scans. - `"imageTestsConfiguration"`: The image tests configuration of the image. - `"tags"`: The tags of the image. +- `"workflows"`: Contains an array of workflow configuration objects. """ function create_image( clientToken, @@ -381,7 +438,8 @@ Creates a new image pipeline. Image pipelines enable you to automate the creatio distribution of images. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `infrastructure_configuration_arn`: The Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images created by this image pipeline. - `name`: The name of the image pipeline. @@ -397,6 +455,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedImageMetadataEnabled"`: Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default. +- `"executionRole"`: The name or Amazon Resource Name (ARN) for the IAM role you create + that grants Image Builder access to perform workflow actions. - `"imageRecipeArn"`: The Amazon Resource Name (ARN) of the image recipe that will be used to configure images created by this image pipeline. - `"imageScanningConfiguration"`: Contains settings for vulnerability scans. @@ -404,6 +464,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"schedule"`: The schedule of the image pipeline. - `"status"`: The status of the image pipeline. - `"tags"`: The tags of the image pipeline. +- `"workflows"`: Contains an array of workflow configuration objects. """ function create_image_pipeline( clientToken, @@ -457,7 +518,8 @@ Creates a new image recipe. Image recipes define how images are configured, test assessed. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `components`: The components included in the image recipe. - `name`: The name of the image recipe. - `parent_image`: The base image of the image recipe. The value of the string can be the @@ -545,7 +607,8 @@ Creates a new infrastructure configuration. An infrastructure configuration defi environment in which your image will be built and tested. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `instance_profile_name`: The instance profile to associate with the instance used to customize your Amazon EC2 AMI. - `name`: The name of the infrastructure configuration. @@ -619,6 +682,171 @@ function create_infrastructure_configuration( ) end +""" + create_lifecycle_policy(client_token, execution_role, name, policy_details, resource_selection, resource_type) + create_lifecycle_policy(client_token, execution_role, name, policy_details, resource_selection, resource_type, params::Dict{String,<:Any}) + +Create a lifecycle policy resource. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `execution_role`: The name or Amazon Resource Name (ARN) for the IAM role you create that + grants Image Builder access to run lifecycle actions. +- `name`: The name of the lifecycle policy to create. +- `policy_details`: Configuration details for the lifecycle policy rules. +- `resource_selection`: Selection criteria for the resources that the lifecycle policy + applies to. +- `resource_type`: The type of Image Builder resource that the lifecycle policy applies to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Optional description for the lifecycle policy. +- `"status"`: Indicates whether the lifecycle policy resource is enabled. +- `"tags"`: Tags to apply to the lifecycle policy resource. +""" +function create_lifecycle_policy( + clientToken, + executionRole, + name, + policyDetails, + resourceSelection, + resourceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/CreateLifecyclePolicy", + Dict{String,Any}( + "clientToken" => clientToken, + "executionRole" => executionRole, + "name" => name, + "policyDetails" => policyDetails, + "resourceSelection" => resourceSelection, + "resourceType" => resourceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_lifecycle_policy( + clientToken, + executionRole, + name, + policyDetails, + resourceSelection, + resourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/CreateLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "executionRole" => executionRole, + "name" => name, + "policyDetails" => policyDetails, + "resourceSelection" => resourceSelection, + "resourceType" => resourceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_workflow(client_token, name, semantic_version, type) + create_workflow(client_token, name, semantic_version, type, params::Dict{String,<:Any}) + +Create a new workflow or a new version of an existing workflow. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `name`: The name of the workflow to create. +- `semantic_version`: The semantic version of this workflow resource. The semantic version + syntax adheres to the following rules. The semantic version has four nodes: + <major>.<minor>.<patch>/<build>. You can assign values for the + first three, and can filter on all of them. Assignment: For the first three nodes you can + assign any positive integer value, including zero, with an upper limit of 2^30-1, or + 1073741823 for each node. Image Builder automatically assigns the build number to the + fourth node. Patterns: You can use any numeric pattern that adheres to the assignment + requirements for the nodes that you can assign. For example, you might choose a software + version pattern, such as 1.0.0, or a date, such as 2021.01.01. +- `type`: The phase in the image build process for which the workflow resource is + responsible. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"changeDescription"`: Describes what change has been made in this version of the + workflow, or what makes this version different from other versions of the workflow. +- `"data"`: Contains the UTF-8 encoded YAML document content for the workflow. + Alternatively, you can specify the uri of a YAML document file stored in Amazon S3. + However, you cannot specify both properties. +- `"description"`: Describes the workflow. +- `"kmsKeyId"`: The ID of the KMS key that is used to encrypt this workflow resource. +- `"tags"`: Tags that apply to the workflow resource. +- `"uri"`: The uri of a YAML component document file. This must be an S3 URL + (s3://bucket/key), and the requester must have permission to access the S3 bucket it points + to. If you use Amazon S3, you can specify component content up to your service quota. + Alternatively, you can specify the YAML document inline, using the component data property. + You cannot specify both properties. +""" +function create_workflow( + clientToken, + name, + semanticVersion, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/CreateWorkflow", + Dict{String,Any}( + "clientToken" => clientToken, + "name" => name, + "semanticVersion" => semanticVersion, + "type" => type, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workflow( + clientToken, + name, + semanticVersion, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/CreateWorkflow", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "name" => name, + "semanticVersion" => semanticVersion, + "type" => type, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_component(component_build_version_arn) delete_component(component_build_version_arn, params::Dict{String,<:Any}) @@ -914,6 +1142,88 @@ function delete_infrastructure_configuration( ) end +""" + delete_lifecycle_policy(lifecycle_policy_arn) + delete_lifecycle_policy(lifecycle_policy_arn, params::Dict{String,<:Any}) + +Delete the specified lifecycle policy resource. + +# Arguments +- `lifecycle_policy_arn`: The Amazon Resource Name (ARN) of the lifecycle policy resource + to delete. + +""" +function delete_lifecycle_policy( + lifecyclePolicyArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "DELETE", + "/DeleteLifecyclePolicy", + Dict{String,Any}("lifecyclePolicyArn" => lifecyclePolicyArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_lifecycle_policy( + lifecyclePolicyArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "DELETE", + "/DeleteLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("lifecyclePolicyArn" => lifecyclePolicyArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_workflow(workflow_build_version_arn) + delete_workflow(workflow_build_version_arn, params::Dict{String,<:Any}) + +Deletes a specific workflow resource. + +# Arguments +- `workflow_build_version_arn`: The Amazon Resource Name (ARN) of the workflow resource to + delete. + +""" +function delete_workflow( + workflowBuildVersionArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "DELETE", + "/DeleteWorkflow", + Dict{String,Any}("workflowBuildVersionArn" => workflowBuildVersionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_workflow( + workflowBuildVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "DELETE", + "/DeleteWorkflow", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("workflowBuildVersionArn" => workflowBuildVersionArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_component(component_build_version_arn) get_component(component_build_version_arn, params::Dict{String,<:Any}) @@ -1355,39 +1665,40 @@ function get_infrastructure_configuration( end """ - get_workflow_execution(workflow_execution_id) - get_workflow_execution(workflow_execution_id, params::Dict{String,<:Any}) + get_lifecycle_execution(lifecycle_execution_id) + get_lifecycle_execution(lifecycle_execution_id, params::Dict{String,<:Any}) -Get the runtime information that was logged for a specific runtime instance of the workflow. +Get the runtime information that was logged for a specific runtime instance of the +lifecycle policy. # Arguments -- `workflow_execution_id`: Use the unique identifier for a runtime instance of the workflow - to get runtime details. +- `lifecycle_execution_id`: Use the unique identifier for a runtime instance of the + lifecycle policy to get runtime details. """ -function get_workflow_execution( - workflowExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +function get_lifecycle_execution( + lifecycleExecutionId; aws_config::AbstractAWSConfig=global_aws_config() ) return imagebuilder( "GET", - "/GetWorkflowExecution", - Dict{String,Any}("workflowExecutionId" => workflowExecutionId); + "/GetLifecycleExecution", + Dict{String,Any}("lifecycleExecutionId" => lifecycleExecutionId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_workflow_execution( - workflowExecutionId, +function get_lifecycle_execution( + lifecycleExecutionId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return imagebuilder( "GET", - "/GetWorkflowExecution", + "/GetLifecycleExecution", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("workflowExecutionId" => workflowExecutionId), + Dict{String,Any}("lifecycleExecutionId" => lifecycleExecutionId), params, ), ); @@ -1397,39 +1708,38 @@ function get_workflow_execution( end """ - get_workflow_step_execution(step_execution_id) - get_workflow_step_execution(step_execution_id, params::Dict{String,<:Any}) + get_lifecycle_policy(lifecycle_policy_arn) + get_lifecycle_policy(lifecycle_policy_arn, params::Dict{String,<:Any}) -Get the runtime information that was logged for a specific runtime instance of the workflow -step. +Get details for the specified image lifecycle policy. # Arguments -- `step_execution_id`: Use the unique identifier for a specific runtime instance of the - workflow step to get runtime details for that step. +- `lifecycle_policy_arn`: Specifies the Amazon Resource Name (ARN) of the image lifecycle + policy resource to get. """ -function get_workflow_step_execution( - stepExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +function get_lifecycle_policy( + lifecyclePolicyArn; aws_config::AbstractAWSConfig=global_aws_config() ) return imagebuilder( "GET", - "/GetWorkflowStepExecution", - Dict{String,Any}("stepExecutionId" => stepExecutionId); + "/GetLifecyclePolicy", + Dict{String,Any}("lifecyclePolicyArn" => lifecyclePolicyArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_workflow_step_execution( - stepExecutionId, +function get_lifecycle_policy( + lifecyclePolicyArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return imagebuilder( "GET", - "/GetWorkflowStepExecution", + "/GetLifecyclePolicy", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("stepExecutionId" => stepExecutionId), params + _merge, Dict{String,Any}("lifecyclePolicyArn" => lifecyclePolicyArn), params ), ); aws_config=aws_config, @@ -1438,18 +1748,144 @@ function get_workflow_step_execution( end """ - import_component(client_token, format, name, platform, semantic_version, type) - import_component(client_token, format, name, platform, semantic_version, type, params::Dict{String,<:Any}) + get_workflow(workflow_build_version_arn) + get_workflow(workflow_build_version_arn, params::Dict{String,<:Any}) -Imports a component and transforms its data into a component document. +Get a workflow resource object. # Arguments -- `client_token`: The idempotency token of the component. -- `format`: The format of the resource that you want to import as a component. -- `name`: The name of the component. -- `platform`: The platform of the component. -- `semantic_version`: The semantic version of the component. This version follows the - semantic version syntax. The semantic version has four nodes: +- `workflow_build_version_arn`: The Amazon Resource Name (ARN) of the workflow resource + that you want to get. + +""" +function get_workflow( + workflowBuildVersionArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "GET", + "/GetWorkflow", + Dict{String,Any}("workflowBuildVersionArn" => workflowBuildVersionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workflow( + workflowBuildVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "GET", + "/GetWorkflow", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("workflowBuildVersionArn" => workflowBuildVersionArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_workflow_execution(workflow_execution_id) + get_workflow_execution(workflow_execution_id, params::Dict{String,<:Any}) + +Get the runtime information that was logged for a specific runtime instance of the workflow. + +# Arguments +- `workflow_execution_id`: Use the unique identifier for a runtime instance of the workflow + to get runtime details. + +""" +function get_workflow_execution( + workflowExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "GET", + "/GetWorkflowExecution", + Dict{String,Any}("workflowExecutionId" => workflowExecutionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workflow_execution( + workflowExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "GET", + "/GetWorkflowExecution", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("workflowExecutionId" => workflowExecutionId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_workflow_step_execution(step_execution_id) + get_workflow_step_execution(step_execution_id, params::Dict{String,<:Any}) + +Get the runtime information that was logged for a specific runtime instance of the workflow +step. + +# Arguments +- `step_execution_id`: Use the unique identifier for a specific runtime instance of the + workflow step to get runtime details for that step. + +""" +function get_workflow_step_execution( + stepExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "GET", + "/GetWorkflowStepExecution", + Dict{String,Any}("stepExecutionId" => stepExecutionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workflow_step_execution( + stepExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "GET", + "/GetWorkflowStepExecution", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stepExecutionId" => stepExecutionId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_component(client_token, format, name, platform, semantic_version, type) + import_component(client_token, format, name, platform, semantic_version, type, params::Dict{String,<:Any}) + +Imports a component and transforms its data into a component document. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `format`: The format of the resource that you want to import as a component. +- `name`: The name of the component. +- `platform`: The platform of the component. +- `semantic_version`: The semantic version of the component. This version follows the + semantic version syntax. The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them. Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when @@ -1462,7 +1898,7 @@ Imports a component and transforms its data into a component document. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"changeDescription"`: The change description of the component. This description indicates the change that has been made in this version, or what makes this version - different from other versions of this component. + different from other versions of the component. - `"data"`: The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component. - `"description"`: The description of the component. Describes the contents of the @@ -1636,7 +2072,7 @@ wildcards. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_component_build_versions( @@ -1689,7 +2125,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: Use the following filters to streamline results: description name platform supportedOsVersion type version - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. - `"owner"`: Filters results based on the type of owner for the component. By default, this request returns a list of components that your account owns. To see results for other types @@ -1724,7 +2160,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: Use the following filters to streamline results: containerType name parentImage platform - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. - `"owner"`: Returns container recipes belonging to the specified owner, that have been shared with you. You can omit this field to return container recipes belonging to your @@ -1760,7 +2196,7 @@ Returns a list of distribution configurations. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filters"`: You can filter on name to streamline results. - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_distribution_configurations(; @@ -1800,7 +2236,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: Use the following filters to streamline results: name osVersion platform type version - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_build_versions( @@ -1846,7 +2282,7 @@ Web Services Systems Manager Inventory at build time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_packages( @@ -1894,7 +2330,7 @@ Returns a list of images created by the specified pipeline. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filters"`: Use the following filters to streamline results: name version - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_pipeline_images( @@ -1938,7 +2374,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys distributionConfigurationArn imageRecipeArn infrastructureConfigurationArn name status - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_pipelines(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1972,7 +2408,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: Use the following filters to streamline results: name parentImage platform - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. - `"owner"`: The owner defines which image recipes you want to list. By default, this request will only show image recipes owned by your account. You can use this field to @@ -2011,7 +2447,7 @@ vulnerabilityId # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filter"`: -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_scan_finding_aggregations(; @@ -2049,7 +2485,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys imagePipelineArn vulnerabilityId severity If you don't request a filter, then all findings in your account are listed. - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_image_scan_findings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2086,7 +2522,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys platform type version - `"includeDeprecated"`: Includes deprecated images in the response list. - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. - `"owner"`: The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to @@ -2120,7 +2556,7 @@ Returns a list of infrastructure configurations. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filters"`: You can filter on name to streamline results. - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_infrastructure_configurations(; @@ -2145,6 +2581,135 @@ function list_infrastructure_configurations( ) end +""" + list_lifecycle_execution_resources(lifecycle_execution_id) + list_lifecycle_execution_resources(lifecycle_execution_id, params::Dict{String,<:Any}) + +List resources that the runtime instance of the image lifecycle identified for lifecycle +actions. + +# Arguments +- `lifecycle_execution_id`: Use the unique identifier for a runtime instance of the + lifecycle policy to get runtime details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +- `"parentResourceId"`: You can leave this empty to get a list of Image Builder resources + that were identified for lifecycle actions. To get a list of associated resources that are + impacted for an individual resource (the parent), specify its Amazon Resource Name (ARN). + Associated resources are produced from your image and distributed when you run a build, + such as AMIs or container images stored in ECR repositories. +""" +function list_lifecycle_execution_resources( + lifecycleExecutionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListLifecycleExecutionResources", + Dict{String,Any}("lifecycleExecutionId" => lifecycleExecutionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lifecycle_execution_resources( + lifecycleExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "POST", + "/ListLifecycleExecutionResources", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("lifecycleExecutionId" => lifecycleExecutionId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_lifecycle_executions(resource_arn) + list_lifecycle_executions(resource_arn, params::Dict{String,<:Any}) + +Get the lifecycle runtime history for the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to get a list of + lifecycle runtime instances. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +""" +function list_lifecycle_executions( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListLifecycleExecutions", + Dict{String,Any}("resourceArn" => resourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lifecycle_executions( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "POST", + "/ListLifecycleExecutions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceArn" => resourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_lifecycle_policies() + list_lifecycle_policies(params::Dict{String,<:Any}) + +Get a list of lifecycle policies in your Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: Streamline results based on one of the following values: Name, Status. +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +""" +function list_lifecycle_policies(; aws_config::AbstractAWSConfig=global_aws_config()) + return imagebuilder( + "POST", + "/ListLifecyclePolicies"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lifecycle_policies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListLifecyclePolicies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -2180,6 +2745,84 @@ function list_tags_for_resource( ) end +""" + list_waiting_workflow_steps() + list_waiting_workflow_steps(params::Dict{String,<:Any}) + +Get a list of workflow steps that are waiting for action for workflows in your Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +""" +function list_waiting_workflow_steps(; aws_config::AbstractAWSConfig=global_aws_config()) + return imagebuilder( + "POST", + "/ListWaitingWorkflowSteps"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_waiting_workflow_steps( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListWaitingWorkflowSteps", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workflow_build_versions(workflow_version_arn) + list_workflow_build_versions(workflow_version_arn, params::Dict{String,<:Any}) + +Returns a list of build versions for a specific workflow resource. + +# Arguments +- `workflow_version_arn`: The Amazon Resource Name (ARN) of the workflow resource for which + to get a list of build versions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +""" +function list_workflow_build_versions( + workflowVersionArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListWorkflowBuildVersions", + Dict{String,Any}("workflowVersionArn" => workflowVersionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workflow_build_versions( + workflowVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "POST", + "/ListWorkflowBuildVersions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("workflowVersionArn" => workflowVersionArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_workflow_executions(image_build_version_arn) list_workflow_executions(image_build_version_arn, params::Dict{String,<:Any}) @@ -2194,7 +2837,7 @@ version. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_workflow_executions( @@ -2232,8 +2875,8 @@ end list_workflow_step_executions(workflow_execution_id) list_workflow_step_executions(workflow_execution_id, params::Dict{String,<:Any}) -Shows runtime data for each step in a runtime instance of the workflow that you specify in -the request. +Returns runtime data for each step in a runtime instance of the workflow that you specify +in the request. # Arguments - `workflow_execution_id`: The unique identifier that Image Builder assigned to keep track @@ -2242,7 +2885,7 @@ the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum items to return in a request. -- `"nextToken"`: A token to specify where to start paginating. This is the NextToken from a +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a previously truncated response. """ function list_workflow_step_executions( @@ -2276,6 +2919,39 @@ function list_workflow_step_executions( ) end +""" + list_workflows() + list_workflows(params::Dict{String,<:Any}) + +Lists workflow build versions based on filtering parameters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"byName"`: Specify all or part of the workflow name to streamline results. +- `"filters"`: Used to streamline search results. +- `"maxResults"`: The maximum items to return in a request. +- `"nextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +- `"owner"`: Used to get a list of workflow build version filtered by the identity of the + creator. +""" +function list_workflows(; aws_config::AbstractAWSConfig=global_aws_config()) + return imagebuilder( + "POST", "/ListWorkflows"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_workflows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "POST", + "/ListWorkflows", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_component_policy(component_arn, policy) put_component_policy(component_arn, policy, params::Dict{String,<:Any}) @@ -2468,6 +3144,73 @@ function put_image_recipe_policy( ) end +""" + send_workflow_step_action(action, client_token, image_build_version_arn, step_execution_id) + send_workflow_step_action(action, client_token, image_build_version_arn, step_execution_id, params::Dict{String,<:Any}) + +Pauses or resumes image creation when the associated workflow runs a WaitForAction step. + +# Arguments +- `action`: The action for the image creation process to take while a workflow + WaitForAction step waits for an asynchronous action to complete. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `image_build_version_arn`: The Amazon Resource Name (ARN) of the image build version to + send action for. +- `step_execution_id`: Uniquely identifies the workflow step that sent the step action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reason"`: The reason why this action is sent. +""" +function send_workflow_step_action( + action, + clientToken, + imageBuildVersionArn, + stepExecutionId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/SendWorkflowStepAction", + Dict{String,Any}( + "action" => action, + "clientToken" => clientToken, + "imageBuildVersionArn" => imageBuildVersionArn, + "stepExecutionId" => stepExecutionId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_workflow_step_action( + action, + clientToken, + imageBuildVersionArn, + stepExecutionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/SendWorkflowStepAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "action" => action, + "clientToken" => clientToken, + "imageBuildVersionArn" => imageBuildVersionArn, + "stepExecutionId" => stepExecutionId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_image_pipeline_execution(client_token, image_pipeline_arn) start_image_pipeline_execution(client_token, image_pipeline_arn, params::Dict{String,<:Any}) @@ -2475,7 +3218,8 @@ end Manually triggers a pipeline to create an image. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `image_pipeline_arn`: The Amazon Resource Name (ARN) of the image pipeline that you want to manually invoke. @@ -2516,6 +3260,69 @@ function start_image_pipeline_execution( ) end +""" + start_resource_state_update(client_token, resource_arn, state) + start_resource_state_update(client_token, resource_arn, state, params::Dict{String,<:Any}) + +Begin asynchronous resource state update for lifecycle changes to the specified image +resources. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `resource_arn`: The ARN of the Image Builder resource that is updated. The state update + might also impact associated resources. +- `state`: Indicates the lifecycle action to take for this request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"exclusionRules"`: Skip action on the image resource and associated resources if + specified exclusion rules are met. +- `"executionRole"`: The name or Amazon Resource Name (ARN) of the IAM role that’s used + to update image state. +- `"includeResources"`: A list of image resources to update state for. +- `"updateAt"`: The timestamp that indicates when resources are updated by a lifecycle + action. +""" +function start_resource_state_update( + clientToken, resourceArn, state; aws_config::AbstractAWSConfig=global_aws_config() +) + return imagebuilder( + "PUT", + "/StartResourceStateUpdate", + Dict{String,Any}( + "clientToken" => clientToken, "resourceArn" => resourceArn, "state" => state + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_resource_state_update( + clientToken, + resourceArn, + state, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/StartResourceStateUpdate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "resourceArn" => resourceArn, + "state" => state, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -2596,7 +3403,8 @@ Updates a new distribution configuration. Distribution configurations define and the outputs of your pipeline. # Arguments -- `client_token`: The idempotency token of the distribution configuration. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `distribution_configuration_arn`: The Amazon Resource Name (ARN) of the distribution configuration that you want to update. - `distributions`: The distributions of the distribution configuration. @@ -2654,12 +3462,14 @@ end update_image_pipeline(client_token, image_pipeline_arn, infrastructure_configuration_arn, params::Dict{String,<:Any}) Updates an image pipeline. Image pipelines enable you to automate the creation and -distribution of images. UpdateImagePipeline does not support selective updates for the -pipeline. You must specify all of the required properties in the update request, not just -the properties that have changed. +distribution of images. You must specify exactly one recipe for your image, using either a +containerRecipeArn or an imageRecipeArn. UpdateImagePipeline does not support selective +updates for the pipeline. You must specify all of the required properties in the update +request, not just the properties that have changed. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `image_pipeline_arn`: The Amazon Resource Name (ARN) of the image pipeline that you want to update. - `infrastructure_configuration_arn`: The Amazon Resource Name (ARN) of the infrastructure @@ -2676,12 +3486,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedImageMetadataEnabled"`: Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default. +- `"executionRole"`: The name or Amazon Resource Name (ARN) for the IAM role you create + that grants Image Builder access to perform workflow actions. - `"imageRecipeArn"`: The Amazon Resource Name (ARN) of the image recipe that will be used to configure images updated by this image pipeline. - `"imageScanningConfiguration"`: Contains settings for vulnerability scans. - `"imageTestsConfiguration"`: The image test configuration of the image pipeline. - `"schedule"`: The schedule of the image pipeline. - `"status"`: The status of the image pipeline. +- `"workflows"`: Contains the workflows to run for the pipeline. """ function update_image_pipeline( clientToken, @@ -2735,7 +3548,8 @@ Updates a new infrastructure configuration. An infrastructure configuration defi environment in which your image will be built and tested. # Arguments -- `client_token`: The idempotency token used to make this request idempotent. +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. - `infrastructure_configuration_arn`: The Amazon Resource Name (ARN) of the infrastructure configuration that you want to update. - `instance_profile_name`: The instance profile to associate with the instance used to @@ -2812,3 +3626,81 @@ function update_infrastructure_configuration( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_lifecycle_policy(client_token, execution_role, lifecycle_policy_arn, policy_details, resource_selection, resource_type) + update_lifecycle_policy(client_token, execution_role, lifecycle_policy_arn, policy_details, resource_selection, resource_type, params::Dict{String,<:Any}) + +Update the specified lifecycle policy. + +# Arguments +- `client_token`: Unique, case-sensitive identifier you provide to ensure idempotency of + the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. +- `execution_role`: The name or Amazon Resource Name (ARN) of the IAM role that Image + Builder uses to update the lifecycle policy. +- `lifecycle_policy_arn`: The Amazon Resource Name (ARN) of the lifecycle policy resource. +- `policy_details`: The configuration details for a lifecycle policy resource. +- `resource_selection`: Selection criteria for resources that the lifecycle policy applies + to. +- `resource_type`: The type of image resource that the lifecycle policy applies to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Optional description for the lifecycle policy. +- `"status"`: Indicates whether the lifecycle policy resource is enabled. +""" +function update_lifecycle_policy( + clientToken, + executionRole, + lifecyclePolicyArn, + policyDetails, + resourceSelection, + resourceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/UpdateLifecyclePolicy", + Dict{String,Any}( + "clientToken" => clientToken, + "executionRole" => executionRole, + "lifecyclePolicyArn" => lifecyclePolicyArn, + "policyDetails" => policyDetails, + "resourceSelection" => resourceSelection, + "resourceType" => resourceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_lifecycle_policy( + clientToken, + executionRole, + lifecyclePolicyArn, + policyDetails, + resourceSelection, + resourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return imagebuilder( + "PUT", + "/UpdateLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "executionRole" => executionRole, + "lifecyclePolicyArn" => lifecyclePolicyArn, + "policyDetails" => policyDetails, + "resourceSelection" => resourceSelection, + "resourceType" => resourceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/inspector2.jl b/src/services/inspector2.jl index 2db5d98077..017817898c 100644 --- a/src/services/inspector2.jl +++ b/src/services/inspector2.jl @@ -71,6 +71,82 @@ function batch_get_account_status( ) end +""" + batch_get_code_snippet(finding_arns) + batch_get_code_snippet(finding_arns, params::Dict{String,<:Any}) + +Retrieves code snippets from findings that Amazon Inspector detected code vulnerabilities +in. + +# Arguments +- `finding_arns`: An array of finding ARNs for the findings you want to retrieve code + snippets from. + +""" +function batch_get_code_snippet( + findingArns; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/codesnippet/batchget", + Dict{String,Any}("findingArns" => findingArns); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_code_snippet( + findingArns, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/codesnippet/batchget", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("findingArns" => findingArns), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_get_finding_details(finding_arns) + batch_get_finding_details(finding_arns, params::Dict{String,<:Any}) + +Gets vulnerability details for findings. + +# Arguments +- `finding_arns`: A list of finding ARNs. + +""" +function batch_get_finding_details( + findingArns; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/findings/details/batch/get", + Dict{String,Any}("findingArns" => findingArns); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_finding_details( + findingArns, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/findings/details/batch/get", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("findingArns" => findingArns), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_free_trial_info(account_ids) batch_get_free_trial_info(account_ids, params::Dict{String,<:Any}) @@ -218,11 +294,112 @@ function cancel_findings_report( ) end +""" + cancel_sbom_export(report_id) + cancel_sbom_export(report_id, params::Dict{String,<:Any}) + +Cancels a software bill of materials (SBOM) report. + +# Arguments +- `report_id`: The report ID of the SBOM export to cancel. + +""" +function cancel_sbom_export(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/sbomexport/cancel", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_sbom_export( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/cancel", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_cis_scan_configuration(scan_name, schedule, security_level, targets) + create_cis_scan_configuration(scan_name, schedule, security_level, targets, params::Dict{String,<:Any}) + +Creates a CIS scan configuration. + +# Arguments +- `scan_name`: The scan name for the CIS scan configuration. +- `schedule`: The schedule for the CIS scan configuration. +- `security_level`: The security level for the CIS scan configuration. Security level + refers to the Benchmark levels that CIS assigns to a profile. +- `targets`: The targets for the CIS scan configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: The tags for the CIS scan configuration. +""" +function create_cis_scan_configuration( + scanName, + schedule, + securityLevel, + targets; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/cis/scan-configuration/create", + Dict{String,Any}( + "scanName" => scanName, + "schedule" => schedule, + "securityLevel" => securityLevel, + "targets" => targets, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cis_scan_configuration( + scanName, + schedule, + securityLevel, + targets, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/cis/scan-configuration/create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "scanName" => scanName, + "schedule" => schedule, + "securityLevel" => securityLevel, + "targets" => targets, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_filter(action, filter_criteria, name) create_filter(action, filter_criteria, name, params::Dict{String,<:Any}) -Creates a filter resource using specified filter criteria. +Creates a filter resource using specified filter criteria. When the filter action is set to +SUPPRESS this action creates a suppression rule. # Arguments - `action`: Defines the action that is to be applied to the findings that match the filter. @@ -324,6 +501,96 @@ function create_findings_report( ) end +""" + create_sbom_export(report_format, s3_destination) + create_sbom_export(report_format, s3_destination, params::Dict{String,<:Any}) + +Creates a software bill of materials (SBOM) report. + +# Arguments +- `report_format`: The output format for the software bill of materials (SBOM) report. +- `s3_destination`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"resourceFilterCriteria"`: The resource filter criteria for the software bill of + materials (SBOM) report. +""" +function create_sbom_export( + reportFormat, s3Destination; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/sbomexport/create", + Dict{String,Any}("reportFormat" => reportFormat, "s3Destination" => s3Destination); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_sbom_export( + reportFormat, + s3Destination, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "reportFormat" => reportFormat, "s3Destination" => s3Destination + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_cis_scan_configuration(scan_configuration_arn) + delete_cis_scan_configuration(scan_configuration_arn, params::Dict{String,<:Any}) + +Deletes a CIS scan configuration. + +# Arguments +- `scan_configuration_arn`: The ARN of the CIS scan configuration. + +""" +function delete_cis_scan_configuration( + scanConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-configuration/delete", + Dict{String,Any}("scanConfigurationArn" => scanConfigurationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_cis_scan_configuration( + scanConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/cis/scan-configuration/delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("scanConfigurationArn" => scanConfigurationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_filter(arn) delete_filter(arn, params::Dict{String,<:Any}) @@ -584,6 +851,104 @@ function enable_delegated_admin_account( ) end +""" + get_cis_scan_report(scan_arn) + get_cis_scan_report(scan_arn, params::Dict{String,<:Any}) + +Retrieves a CIS scan report. + +# Arguments +- `scan_arn`: The scan ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reportFormat"`: The format of the report. Valid values are PDF and CSV. If no value is + specified, the report format defaults to PDF. +- `"targetAccounts"`: The target accounts. +""" +function get_cis_scan_report(scanArn; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/cis/scan/report/get", + Dict{String,Any}("scanArn" => scanArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_cis_scan_report( + scanArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan/report/get", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("scanArn" => scanArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_cis_scan_result_details(account_id, scan_arn, target_resource_id) + get_cis_scan_result_details(account_id, scan_arn, target_resource_id, params::Dict{String,<:Any}) + +Retrieves CIS scan result details. + +# Arguments +- `account_id`: The account ID. +- `scan_arn`: The scan ARN. +- `target_resource_id`: The target resource ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterCriteria"`: The filter criteria. +- `"maxResults"`: The maximum number of CIS scan result details to be returned in a single + page of results. +- `"nextToken"`: The pagination token from a previous request that's used to retrieve the + next page of results. +- `"sortBy"`: The sort by order. +- `"sortOrder"`: The sort order. +""" +function get_cis_scan_result_details( + accountId, scanArn, targetResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-result/details/get", + Dict{String,Any}( + "accountId" => accountId, + "scanArn" => scanArn, + "targetResourceId" => targetResourceId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_cis_scan_result_details( + accountId, + scanArn, + targetResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/cis/scan-result/details/get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "accountId" => accountId, + "scanArn" => scanArn, + "targetResourceId" => targetResourceId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_configuration() get_configuration(params::Dict{String,<:Any}) @@ -667,29 +1032,72 @@ function get_ec2_deep_inspection_configuration( end """ - get_findings_report_status() - get_findings_report_status(params::Dict{String,<:Any}) + get_encryption_key(resource_type, scan_type) + get_encryption_key(resource_type, scan_type, params::Dict{String,<:Any}) -Gets the status of a findings report. +Gets an encryption key. + +# Arguments +- `resource_type`: The resource type the key encrypts. +- `scan_type`: The scan type the key encrypts. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"reportId"`: The ID of the report to retrieve the status of. """ -function get_findings_report_status(; aws_config::AbstractAWSConfig=global_aws_config()) +function get_encryption_key( + resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) return inspector2( - "POST", - "/reporting/status/get"; + "GET", + "/encryptionkey/get", + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_findings_report_status( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function get_encryption_key( + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return inspector2( - "POST", - "/reporting/status/get", + "GET", + "/encryptionkey/get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_findings_report_status() + get_findings_report_status(params::Dict{String,<:Any}) + +Gets the status of a findings report. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"reportId"`: The ID of the report to retrieve the status of. +""" +function get_findings_report_status(; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/reporting/status/get"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_findings_report_status( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/reporting/status/get", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -732,6 +1140,41 @@ function get_member( ) end +""" + get_sbom_export(report_id) + get_sbom_export(report_id, params::Dict{String,<:Any}) + +Gets details of a software bill of materials (SBOM) report. + +# Arguments +- `report_id`: The report ID of the SBOM export to get details for. + +""" +function get_sbom_export(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/sbomexport/get", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sbom_export( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/get", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_account_permissions() list_account_permissions(params::Dict{String,<:Any}) @@ -740,11 +1183,14 @@ Lists the permissions an account has to configure Amazon Inspector. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the NextToken value returned from the previous request to + continue listing results after the first page. - `"service"`: The service scan type to check permissions for. """ function list_account_permissions(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -767,6 +1213,159 @@ function list_account_permissions( ) end +""" + list_cis_scan_configurations() + list_cis_scan_configurations(params::Dict{String,<:Any}) + +Lists CIS scan configurations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterCriteria"`: The CIS scan configuration filter criteria. +- `"maxResults"`: The maximum number of CIS scan configurations to be returned in a single + page of results. +- `"nextToken"`: The pagination token from a previous request that's used to retrieve the + next page of results. +- `"sortBy"`: The CIS scan configuration sort by order. +- `"sortOrder"`: The CIS scan configuration sort order order. +""" +function list_cis_scan_configurations(; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/cis/scan-configuration/list"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cis_scan_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-configuration/list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cis_scan_results_aggregated_by_checks(scan_arn) + list_cis_scan_results_aggregated_by_checks(scan_arn, params::Dict{String,<:Any}) + +Lists scan results aggregated by checks. + +# Arguments +- `scan_arn`: The scan ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterCriteria"`: The filter criteria. +- `"maxResults"`: The maximum number of scan results aggregated by checks to be returned in + a single page of results. +- `"nextToken"`: The pagination token from a previous request that's used to retrieve the + next page of results. +- `"sortBy"`: The sort by order. +- `"sortOrder"`: The sort order. +""" +function list_cis_scan_results_aggregated_by_checks( + scanArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-result/check/list", + Dict{String,Any}("scanArn" => scanArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cis_scan_results_aggregated_by_checks( + scanArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-result/check/list", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("scanArn" => scanArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cis_scan_results_aggregated_by_target_resource(scan_arn) + list_cis_scan_results_aggregated_by_target_resource(scan_arn, params::Dict{String,<:Any}) + +Lists scan results aggregated by a target resource. + +# Arguments +- `scan_arn`: The scan ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterCriteria"`: The filter criteria. +- `"maxResults"`: The maximum number of scan results aggregated by a target resource to be + returned in a single page of results. +- `"nextToken"`: The pagination token from a previous request that's used to retrieve the + next page of results. +- `"sortBy"`: The sort by order. +- `"sortOrder"`: The sort order. +""" +function list_cis_scan_results_aggregated_by_target_resource( + scanArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-result/resource/list", + Dict{String,Any}("scanArn" => scanArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cis_scan_results_aggregated_by_target_resource( + scanArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan-result/resource/list", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("scanArn" => scanArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cis_scans() + list_cis_scans(params::Dict{String,<:Any}) + +Returns a CIS scan list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"detailLevel"`: The detail applied to the CIS scan. +- `"filterCriteria"`: The CIS scan filter criteria. +- `"maxResults"`: The maximum number of results to be returned. +- `"nextToken"`: The pagination token from a previous request that's used to retrieve the + next page of results. +- `"sortBy"`: The CIS scans sort by order. +- `"sortOrder"`: The CIS scans sort order. +""" +function list_cis_scans(; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", "/cis/scan/list"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_cis_scans( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/cis/scan/list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_coverage() list_coverage(params::Dict{String,<:Any}) @@ -777,11 +1376,14 @@ Lists coverage details for you environment. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filterCriteria"`: An object that contains details on the filters to apply to the coverage data for your environment. -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. """ function list_coverage(; aws_config::AbstractAWSConfig=global_aws_config()) return inspector2( @@ -844,11 +1446,14 @@ Lists information about the Amazon Inspector delegated administrator of your org # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. """ function list_delegated_admin_accounts(; aws_config::AbstractAWSConfig=global_aws_config()) return inspector2( @@ -880,11 +1485,14 @@ Lists the filters associated with your account. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"action"`: The action the filter applies to matched findings. - `"arns"`: The Amazon resource number (ARN) of the filter. -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. """ function list_filters(; aws_config::AbstractAWSConfig=global_aws_config()) return inspector2( @@ -918,11 +1526,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for. - `"aggregationRequest"`: Details of the aggregation request that is used to filter your aggregation results. -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. """ function list_finding_aggregations( aggregationType; aws_config::AbstractAWSConfig=global_aws_config() @@ -962,11 +1573,14 @@ Lists findings for your environment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filterCriteria"`: Details on the filters to apply to your finding results. -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. - `"sortCriteria"`: Details on the sort criteria to apply to your finding results. """ function list_findings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -995,11 +1609,14 @@ organization. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. - `"onlyAssociated"`: Specifies whether to list only currently associated members if True or to list all members within the organization if False. """ @@ -1063,11 +1680,14 @@ Lists the Amazon Inspector usage totals over the last 30 days. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accountIds"`: The Amazon Web Services account IDs to retrieve usage totals for. -- `"maxResults"`: The maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results the response can return. If your request + would return more than the maximum the response will return a nextToken value, use this + value when you call the action again to get the remaining results. - `"nextToken"`: A token to use for paginating results that are returned in the response. - Set the value of this parameter to null for the first request to a list action. For - subsequent calls, use the NextToken value returned from the previous request to continue - listing results after the first page. + Set the value of this parameter to null for the first request to a list action. If your + response returns more than the maxResults maximum value it will also return a nextToken + value. For subsequent calls, use the nextToken value returned from the previous request to + continue listing results after the first page. """ function list_usage_totals(; aws_config::AbstractAWSConfig=global_aws_config()) return inspector2( @@ -1086,6 +1706,50 @@ function list_usage_totals( ) end +""" + reset_encryption_key(resource_type, scan_type) + reset_encryption_key(resource_type, scan_type, params::Dict{String,<:Any}) + +Resets an encryption key. After the key is reset your resources will be encrypted by an +Amazon Web Services owned key. + +# Arguments +- `resource_type`: The resource type the key encrypts. +- `scan_type`: The scan type the key encrypts. + +""" +function reset_encryption_key( + resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/encryptionkey/reset", + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_encryption_key( + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/encryptionkey/reset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_vulnerabilities(filter_criteria) search_vulnerabilities(filter_criteria, params::Dict{String,<:Any}) @@ -1129,6 +1793,202 @@ function search_vulnerabilities( ) end +""" + send_cis_session_health(scan_job_id, session_token) + send_cis_session_health(scan_job_id, session_token, params::Dict{String,<:Any}) + + Sends a CIS session health. This API is used by the Amazon Inspector SSM plugin to +communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this +API to start a CIS scan session for the scan ID supplied by the service. + +# Arguments +- `scan_job_id`: A unique identifier for the scan job. +- `session_token`: The unique token that identifies the CIS session. + +""" +function send_cis_session_health( + scanJobId, sessionToken; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/cissession/health/send", + Dict{String,Any}("scanJobId" => scanJobId, "sessionToken" => sessionToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_cis_session_health( + scanJobId, + sessionToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/cissession/health/send", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("scanJobId" => scanJobId, "sessionToken" => sessionToken), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + send_cis_session_telemetry(messages, scan_job_id, session_token) + send_cis_session_telemetry(messages, scan_job_id, session_token, params::Dict{String,<:Any}) + + Sends a CIS session telemetry. This API is used by the Amazon Inspector SSM plugin to +communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this +API to start a CIS scan session for the scan ID supplied by the service. + +# Arguments +- `messages`: The CIS session telemetry messages. +- `scan_job_id`: A unique identifier for the scan job. +- `session_token`: The unique token that identifies the CIS session. + +""" +function send_cis_session_telemetry( + messages, scanJobId, sessionToken; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/cissession/telemetry/send", + Dict{String,Any}( + "messages" => messages, "scanJobId" => scanJobId, "sessionToken" => sessionToken + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_cis_session_telemetry( + messages, + scanJobId, + sessionToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/cissession/telemetry/send", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "messages" => messages, + "scanJobId" => scanJobId, + "sessionToken" => sessionToken, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_cis_session(message, scan_job_id) + start_cis_session(message, scan_job_id, params::Dict{String,<:Any}) + + Starts a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate +with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start +a CIS scan session for the scan ID supplied by the service. + +# Arguments +- `message`: The start CIS session message. +- `scan_job_id`: A unique identifier for the scan job. + +""" +function start_cis_session( + message, scanJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/cissession/start", + Dict{String,Any}("message" => message, "scanJobId" => scanJobId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_cis_session( + message, + scanJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/cissession/start", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("message" => message, "scanJobId" => scanJobId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_cis_session(message, scan_job_id, session_token) + stop_cis_session(message, scan_job_id, session_token, params::Dict{String,<:Any}) + + Stops a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate +with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start +a CIS scan session for the scan ID supplied by the service. + +# Arguments +- `message`: The stop CIS session message. +- `scan_job_id`: A unique identifier for the scan job. +- `session_token`: The unique token that identifies the CIS session. + +""" +function stop_cis_session( + message, scanJobId, sessionToken; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/cissession/stop", + Dict{String,Any}( + "message" => message, "scanJobId" => scanJobId, "sessionToken" => sessionToken + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_cis_session( + message, + scanJobId, + sessionToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/cissession/stop", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "message" => message, + "scanJobId" => scanJobId, + "sessionToken" => sessionToken, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1202,40 +2062,46 @@ function untag_resource( end """ - update_configuration(ecr_configuration) - update_configuration(ecr_configuration, params::Dict{String,<:Any}) + update_cis_scan_configuration(scan_configuration_arn) + update_cis_scan_configuration(scan_configuration_arn, params::Dict{String,<:Any}) -Updates setting configurations for your Amazon Inspector account. When you use this API as -an Amazon Inspector delegated administrator this updates the setting for all accounts you -manage. Member accounts in an organization cannot update this setting. +Updates a CIS scan configuration. # Arguments -- `ecr_configuration`: Specifies how the ECR automated re-scan will be updated for your - environment. +- `scan_configuration_arn`: The CIS scan configuration ARN. -""" -function update_configuration( - ecrConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"scanName"`: The scan name for the CIS scan configuration. +- `"schedule"`: The schedule for the CIS scan configuration. +- `"securityLevel"`: The security level for the CIS scan configuration. Security level + refers to the Benchmark levels that CIS assigns to a profile. +- `"targets"`: The targets for the CIS scan configuration. +""" +function update_cis_scan_configuration( + scanConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() ) return inspector2( "POST", - "/configuration/update", - Dict{String,Any}("ecrConfiguration" => ecrConfiguration); + "/cis/scan-configuration/update", + Dict{String,Any}("scanConfigurationArn" => scanConfigurationArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function update_configuration( - ecrConfiguration, +function update_cis_scan_configuration( + scanConfigurationArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return inspector2( "POST", - "/configuration/update", + "/cis/scan-configuration/update", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("ecrConfiguration" => ecrConfiguration), params + _merge, + Dict{String,Any}("scanConfigurationArn" => scanConfigurationArn), + params, ), ); aws_config=aws_config, @@ -1243,6 +2109,41 @@ function update_configuration( ) end +""" + update_configuration() + update_configuration(params::Dict{String,<:Any}) + +Updates setting configurations for your Amazon Inspector account. When you use this API as +an Amazon Inspector delegated administrator this updates the setting for all accounts you +manage. Member accounts in an organization cannot update this setting. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ec2Configuration"`: Specifies how the Amazon EC2 automated scan will be updated for + your environment. +- `"ecrConfiguration"`: Specifies how the ECR automated re-scan will be updated for your + environment. +""" +function update_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/configuration/update"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/configuration/update", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_ec2_deep_inspection_configuration() update_ec2_deep_inspection_configuration(params::Dict{String,<:Any}) @@ -1281,6 +2182,58 @@ function update_ec2_deep_inspection_configuration( ) end +""" + update_encryption_key(kms_key_id, resource_type, scan_type) + update_encryption_key(kms_key_id, resource_type, scan_type, params::Dict{String,<:Any}) + +Updates an encryption key. A ResourceNotFoundException means that an Amazon Web Services +owned key is being used for encryption. + +# Arguments +- `kms_key_id`: A KMS key ID for the encryption key. +- `resource_type`: The resource type for the encryption key. +- `scan_type`: The scan type for the encryption key. + +""" +function update_encryption_key( + kmsKeyId, resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/encryptionkey/update", + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, "resourceType" => resourceType, "scanType" => scanType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_encryption_key( + kmsKeyId, + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/encryptionkey/update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, + "resourceType" => resourceType, + "scanType" => scanType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_filter(filter_arn) update_filter(filter_arn, params::Dict{String,<:Any}) diff --git a/src/services/inspector_scan.jl b/src/services/inspector_scan.jl new file mode 100644 index 0000000000..cc818d1d04 --- /dev/null +++ b/src/services/inspector_scan.jl @@ -0,0 +1,42 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: inspector_scan +using AWS.Compat +using AWS.UUIDs + +""" + scan_sbom(sbom) + scan_sbom(sbom, params::Dict{String,<:Any}) + +Scans a provided CycloneDX 1.5 SBOM and reports on any vulnerabilities discovered in that +SBOM. You can generate compatible SBOMs for your resources using the Amazon Inspector SBOM +generator. + +# Arguments +- `sbom`: The JSON file for the SBOM you want to scan. The SBOM must be in CycloneDX 1.5 + format. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"outputFormat"`: The output format for the vulnerability report. +""" +function scan_sbom(sbom; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector_scan( + "POST", + "/scan/sbom", + Dict{String,Any}("sbom" => sbom); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function scan_sbom( + sbom, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector_scan( + "POST", + "/scan/sbom", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("sbom" => sbom), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/internetmonitor.jl b/src/services/internetmonitor.jl index a8371f117f..41826dae44 100644 --- a/src/services/internetmonitor.jl +++ b/src/services/internetmonitor.jl @@ -9,17 +9,19 @@ using AWS.UUIDs create_monitor(monitor_name, params::Dict{String,<:Any}) Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on -information from the application resources that you add: Amazon Virtual Private Clouds -(VPCs), Amazon CloudFront distributions, and WorkSpaces directories. Internet Monitor then -publishes internet measurements from Amazon Web Services that are specific to the -city-networks, that is, the locations and ASNs (typically internet service providers or +information from the application resources that you add: VPCs, Network Load Balancers +(NLBs), Amazon CloudFront distributions, and Amazon WorkSpaces directories. Internet +Monitor then publishes internet measurements from Amazon Web Services that are specific to +the city-networks. That is, the locations and ASNs (typically internet service providers or ISPs), where clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide. When you create a monitor, -you set a maximum limit for the number of city-networks where client traffic is monitored. -The city-network maximum that you choose is the limit, but you only pay for the number of -city-networks that are actually monitored. You can change the maximum at any time by -updating your monitor. For more information, see Choosing a city-network maximum value in -the Amazon CloudWatch User Guide. +you choose the percentage of traffic that you want to monitor. You can also set a maximum +limit for the number of city-networks where client traffic is monitored, that caps the +total traffic that Internet Monitor monitors. A city-network maximum is the limit of +city-networks, but you only pay for the number of city-networks that are actually +monitored. You can update your monitor at any time to change the percentage of traffic to +monitor or the city-networks maximum. For more information, see Choosing a city-network +maximum value in the Amazon CloudWatch User Guide. # Arguments - `monitor_name`: The name of the monitor. @@ -29,23 +31,34 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. Don't reuse the same client token for other API requests. +- `"HealthEventsConfig"`: Defines the threshold percentages and other configuration + information for when Amazon CloudWatch Internet Monitor creates a health event. Internet + Monitor creates a health event when an internet issue that affects your application end + users has a health score percentage that is at or below a specific threshold, and, + sometimes, when other criteria are met. If you don't set a health event threshold, the + default value is 95%. For more information, see Change health event thresholds in the + Internet Monitor section of the CloudWatch User Guide. - `"InternetMeasurementsLogDelivery"`: Publish internet measurements for Internet Monitor to an Amazon S3 bucket in addition to CloudWatch Logs. - `"MaxCityNetworksToMonitor"`: The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application - resources from and the network or ASN, such as an internet service provider (ISP), that - clients access the resources through. This limit helps control billing costs. To learn - more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor - section of the CloudWatch User Guide. + resources from and the ASN or network provider, such as an internet service provider (ISP), + that clients access the resources through. Setting this limit can help control billing + costs. To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch + Internet Monitor section of the CloudWatch User Guide. - `"Resources"`: The resources to include in a monitor, which you provide as a set of - Amazon Resource Names (ARNs). You can add a combination of Amazon Virtual Private Clouds - (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. - You can't add all three types of resources. If you add only VPC resources, at least one - VPC must have an Internet Gateway attached to it, to make sure that it has internet - connectivity. + Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, + or Amazon WorkSpaces directories. You can add a combination of VPCs and CloudFront + distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add + NLBs or WorkSpaces directories together with any other resources. If you add only Amazon + VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure + that it has internet connectivity. - `"Tags"`: The tags for a monitor. You can add a maximum of 50 tags in Internet Monitor. - `"TrafficPercentageToMonitor"`: The percentage of the internet-facing traffic for your - application that you want to monitor with this monitor. + application that you want to monitor with this monitor. If you set a city-networks maximum, + that limit overrides the traffic percentage that you set. To learn more, see Choosing an + application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor + section of the CloudWatch User Guide. """ function create_monitor(MonitorName; aws_config::AbstractAWSConfig=global_aws_config()) return internetmonitor( @@ -114,19 +127,25 @@ end get_health_event(event_id, monitor_name) get_health_event(event_id, monitor_name, params::Dict{String,<:Any}) -Gets information the Amazon CloudWatch Internet Monitor has created and stored about a +Gets information that Amazon CloudWatch Internet Monitor has created and stored about a health event for a specified monitor. This information includes the impacted locations, and -all of the information related to the event by location. The information returned includes -the performance, availability, and round-trip time impact, information about the network -providers, the event type, and so on. Information rolled up at the global traffic level is -also returned, including the impact type and total traffic impact. +all the information related to the event, by location. The information returned includes +the impact on performance, availability, and round-trip time, information about the network +providers (ASNs), the event type, and so on. Information rolled up at the global traffic +level is also returned, including the impact type and total traffic impact. # Arguments -- `event_id`: The internally generated identifier of a health event. Because EventID +- `event_id`: The internally-generated identifier of a health event. Because EventID contains the forward slash (“/”) character, you must URL-encode the EventID field in the request URL. - `monitor_name`: The name of the monitor. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkedAccountId"`: The account ID for an account that you've set up cross-account + sharing for in Amazon CloudWatch Internet Monitor. You configure cross-account sharing by + using Amazon CloudWatch Observability Access Manager. For more information, see Internet + Monitor cross-account observability in the Amazon CloudWatch Internet Monitor User Guide. """ function get_health_event( EventId, MonitorName; aws_config::AbstractAWSConfig=global_aws_config() @@ -153,6 +172,41 @@ function get_health_event( ) end +""" + get_internet_event(event_id) + get_internet_event(event_id, params::Dict{String,<:Any}) + +Gets information that Amazon CloudWatch Internet Monitor has generated about an internet +event. Internet Monitor displays information about recent global health events, called +internet events, on a global outages map that is available to all Amazon Web Services +customers. The information returned here includes the impacted location, when the event +started and (if the event is over) ended, the type of event (PERFORMANCE or AVAILABILITY), +and the status (ACTIVE or RESOLVED). + +# Arguments +- `event_id`: The EventId of the internet event to return information for. + +""" +function get_internet_event(EventId; aws_config::AbstractAWSConfig=global_aws_config()) + return internetmonitor( + "GET", + "/v20210603/InternetEvents/$(EventId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_internet_event( + EventId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return internetmonitor( + "GET", + "/v20210603/InternetEvents/$(EventId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_monitor(monitor_name) get_monitor(monitor_name, params::Dict{String,<:Any}) @@ -164,6 +218,12 @@ modified time, resources included in the monitor, and status information. # Arguments - `monitor_name`: The name of the monitor. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkedAccountId"`: The account ID for an account that you've set up cross-account + sharing for in Amazon CloudWatch Internet Monitor. You configure cross-account sharing by + using Amazon CloudWatch Observability Access Manager. For more information, see Internet + Monitor cross-account observability in the Amazon CloudWatch Internet Monitor User Guide. """ function get_monitor(MonitorName; aws_config::AbstractAWSConfig=global_aws_config()) return internetmonitor( @@ -187,13 +247,100 @@ function get_monitor( ) end +""" + get_query_results(monitor_name, query_id) + get_query_results(monitor_name, query_id, params::Dict{String,<:Any}) + +Return the data for a query with the Amazon CloudWatch Internet Monitor query interface. +Specify the query that you want to return results for by providing a QueryId and a monitor +name. For more information about using the query interface, including examples, see Using +the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet +Monitor User Guide. + +# Arguments +- `monitor_name`: The name of the monitor to return data for. +- `query_id`: The ID of the query that you want to return data results for. A QueryId is an + internally-generated identifier for a specific query. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The number of query results that you want to return with this call. +- `"NextToken"`: The token for the next set of results. You receive this token from a + previous call. +""" +function get_query_results( + MonitorName, QueryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return internetmonitor( + "GET", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)/Results"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_query_results( + MonitorName, + QueryId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return internetmonitor( + "GET", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)/Results", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_query_status(monitor_name, query_id) + get_query_status(monitor_name, query_id, params::Dict{String,<:Any}) + +Returns the current status of a query for the Amazon CloudWatch Internet Monitor query +interface, for a specified query ID and monitor. When you run a query, check the status to +make sure that the query has SUCCEEDED before you review the results. QUEUED: The query +is scheduled to run. RUNNING: The query is in progress but not complete. SUCCEEDED: +The query completed sucessfully. FAILED: The query failed due to an error. CANCELED: +The query was canceled. + +# Arguments +- `monitor_name`: The name of the monitor. +- `query_id`: The ID of the query that you want to return the status for. A QueryId is an + internally-generated dentifier for a specific query. + +""" +function get_query_status( + MonitorName, QueryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return internetmonitor( + "GET", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)/Status"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_query_status( + MonitorName, + QueryId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return internetmonitor( + "GET", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)/Status", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_health_events(monitor_name) list_health_events(monitor_name, params::Dict{String,<:Any}) -Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns all -information for health events including the client location information the network cause -and status, event start and end time, percentage of total traffic impacted, and status. +Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns +information for health events including the event start and end times, and the status. Health events that have start times during the time frame that is requested are not included in the list of health events. @@ -205,6 +352,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"EndTime"`: The time when a health event ended. If the health event is still ongoing, then the end time is not set. - `"EventStatus"`: The status of a health event. +- `"LinkedAccountId"`: The account ID for an account that you've set up cross-account + sharing for in Amazon CloudWatch Internet Monitor. You configure cross-account sharing by + using Amazon CloudWatch Observability Access Manager. For more information, see Internet + Monitor cross-account observability in the Amazon CloudWatch Internet Monitor User Guide. - `"MaxResults"`: The number of health event objects that you want to return with this call. - `"NextToken"`: The token for the next set of results. You receive this token from a @@ -233,6 +384,53 @@ function list_health_events( ) end +""" + list_internet_events() + list_internet_events(params::Dict{String,<:Any}) + +Lists internet events that cause performance or availability issues for client locations. +Amazon CloudWatch Internet Monitor displays information about recent global health events, +called internet events, on a global outages map that is available to all Amazon Web +Services customers. You can constrain the list of internet events returned by providing a +start time and end time to define a total time frame for events you want to list. Both +start time and end time specify the time when an event started. End time is optional. If +you don't include it, the default end time is the current time. You can also limit the +events returned to a specific status (ACTIVE or RESOLVED) or type (PERFORMANCE or +AVAILABILITY). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EndTime"`: The end time of the time window that you want to get a list of internet + events for. +- `"EventStatus"`: The status of an internet event. +- `"EventType"`: The type of network impairment. +- `"InternetEventMaxResults"`: The number of query results that you want to return with + this call. +- `"NextToken"`: The token for the next set of results. You receive this token from a + previous call. +- `"StartTime"`: The start time of the time window that you want to get a list of internet + events for. +""" +function list_internet_events(; aws_config::AbstractAWSConfig=global_aws_config()) + return internetmonitor( + "GET", + "/v20210603/InternetEvents"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_internet_events( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return internetmonitor( + "GET", + "/v20210603/InternetEvents", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_monitors() list_monitors(params::Dict{String,<:Any}) @@ -242,6 +440,11 @@ with the Amazon Resource Name (ARN) and name of each monitor. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncludeLinkedAccounts"`: A boolean option that you can set to TRUE to include monitors + for linked accounts in a list of monitors, when you've set up cross-account sharing in + Amazon CloudWatch Internet Monitor. You configure cross-account sharing by using Amazon + CloudWatch Observability Access Manager. For more information, see Internet Monitor + cross-account observability in the Amazon CloudWatch Internet Monitor User Guide. - `"MaxResults"`: The number of monitor objects that you want to return with this call. - `"MonitorStatus"`: The status of a monitor. This includes the status of the data processing for the monitor and the status of the monitor itself. For information about the @@ -301,6 +504,124 @@ function list_tags_for_resource( ) end +""" + start_query(end_time, monitor_name, query_type, start_time) + start_query(end_time, monitor_name, query_type, start_time, params::Dict{String,<:Any}) + +Start a query to return data for a specific query type for the Amazon CloudWatch Internet +Monitor query interface. Specify a time period for the data that you want returned by using +StartTime and EndTime. You filter the query results to return by providing parameters that +you specify with FilterParameters. For more information about using the query interface, +including examples, see Using the Amazon CloudWatch Internet Monitor query interface in the +Amazon CloudWatch Internet Monitor User Guide. + +# Arguments +- `end_time`: The timestamp that is the end of the period that you want to retrieve data + for with your query. +- `monitor_name`: The name of the monitor to query. +- `query_type`: The type of query to run. The following are the three types of queries that + you can run using the Internet Monitor query interface: MEASUREMENTS: Provides + availability score, performance score, total traffic, and round-trip times, at 5 minute + intervals. TOP_LOCATIONS: Provides availability score, performance score, total traffic, + and time to first byte (TTFB) information, for the top location and ASN combinations that + you're monitoring, by traffic volume. TOP_LOCATION_DETAILS: Provides TTFB for Amazon + CloudFront, your current configuration, and the best performing EC2 configuration, at 1 + hour intervals. For lists of the fields returned with each query type and more + information about how each type of query is performed, see Using the Amazon CloudWatch + Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide. +- `start_time`: The timestamp that is the beginning of the period that you want to retrieve + data for with your query. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FilterParameters"`: The FilterParameters field that you use with Amazon CloudWatch + Internet Monitor queries is a string the defines how you want a query to be filtered. The + filter parameters that you can specify depend on the query type, since each query type + returns a different set of Internet Monitor data. For more information about specifying + filter parameters, see Using the Amazon CloudWatch Internet Monitor query interface in the + Amazon CloudWatch Internet Monitor User Guide. +- `"LinkedAccountId"`: The account ID for an account that you've set up cross-account + sharing for in Amazon CloudWatch Internet Monitor. You configure cross-account sharing by + using Amazon CloudWatch Observability Access Manager. For more information, see Internet + Monitor cross-account observability in the Amazon CloudWatch Internet Monitor User Guide. +""" +function start_query( + EndTime, + MonitorName, + QueryType, + StartTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return internetmonitor( + "POST", + "/v20210603/Monitors/$(MonitorName)/Queries", + Dict{String,Any}( + "EndTime" => EndTime, "QueryType" => QueryType, "StartTime" => StartTime + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_query( + EndTime, + MonitorName, + QueryType, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return internetmonitor( + "POST", + "/v20210603/Monitors/$(MonitorName)/Queries", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, "QueryType" => QueryType, "StartTime" => StartTime + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_query(monitor_name, query_id) + stop_query(monitor_name, query_id, params::Dict{String,<:Any}) + +Stop a query that is progress for a specific monitor. + +# Arguments +- `monitor_name`: The name of the monitor. +- `query_id`: The ID of the query that you want to stop. A QueryId is an + internally-generated identifier for a specific query. + +""" +function stop_query(MonitorName, QueryId; aws_config::AbstractAWSConfig=global_aws_config()) + return internetmonitor( + "DELETE", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_query( + MonitorName, + QueryId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return internetmonitor( + "DELETE", + "/v20210603/Monitors/$(MonitorName)/Queries/$(QueryId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -382,9 +703,9 @@ end update_monitor(monitor_name) update_monitor(monitor_name, params::Dict{String,<:Any}) -Updates a monitor. You can update a monitor to change the maximum number of city-networks -(locations and ASNs or internet service providers), to add or remove resources, or to -change the status of the monitor. Note that you can't change the name of a monitor. The +Updates a monitor. You can update a monitor to change the percentage of traffic to monitor +or the maximum number of city-networks (locations and ASNs), to add or remove resources, or +to change the status of the monitor. Note that you can't change the name of a monitor. The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide. @@ -397,26 +718,36 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. You should not reuse the same client token for other API requests. +- `"HealthEventsConfig"`: The list of health score thresholds. A threshold percentage for + health scores, along with other configuration information, determines when Internet Monitor + creates a health event when there's an internet issue that affects your application end + users. For more information, see Change health event thresholds in the Internet Monitor + section of the CloudWatch User Guide. - `"InternetMeasurementsLogDelivery"`: Publish internet measurements for Internet Monitor to another location, such as an Amazon S3 bucket. The measurements are also published to Amazon CloudWatch Logs. - `"MaxCityNetworksToMonitor"`: The maximum number of city-networks to monitor for your - resources. A city-network is the location (city) where clients access your application - resources from and the network or ASN, such as an internet service provider, that clients - access the resources through. + application. A city-network is the location (city) where clients access your application + resources from and the ASN or network provider, such as an internet service provider (ISP), + that clients access the resources through. Setting this limit can help control billing + costs. - `"ResourcesToAdd"`: The resources to include in a monitor, which you provide as a set of - Amazon Resource Names (ARNs). You can add a combination of Amazon Virtual Private Clouds - (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. - You can't add all three types of resources. If you add only VPC resources, at least one - VPC must have an Internet Gateway attached to it, to make sure that it has internet - connectivity. + Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, + or Amazon WorkSpaces directories. You can add a combination of VPCs and CloudFront + distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add + NLBs or WorkSpaces directories together with any other resources. If you add only Amazon + Virtual Private Clouds resources, at least one VPC must have an Internet Gateway attached + to it, to make sure that it has internet connectivity. - `"ResourcesToRemove"`: The resources to remove from a monitor, which you provide as a set of Amazon Resource Names (ARNs). - `"Status"`: The status for a monitor. The accepted values for Status with the UpdateMonitor API call are the following: ACTIVE and INACTIVE. The following values are not accepted: PENDING, and ERROR. - `"TrafficPercentageToMonitor"`: The percentage of the internet-facing traffic for your - application that you want to monitor with this monitor. + application that you want to monitor with this monitor. If you set a city-networks maximum, + that limit overrides the traffic percentage that you set. To learn more, see Choosing an + application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor + section of the CloudWatch User Guide. """ function update_monitor(MonitorName; aws_config::AbstractAWSConfig=global_aws_config()) return internetmonitor( diff --git a/src/services/iot.jl b/src/services/iot.jl index d2ab956d67..4997d5825a 100644 --- a/src/services/iot.jl +++ b/src/services/iot.jl @@ -138,7 +138,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. """ function associate_targets_with_job( jobId, targets; aws_config::AbstractAWSConfig=global_aws_config() @@ -828,11 +829,11 @@ end Creates an X.509 certificate using the specified certificate signing request. Requires permission to access the CreateCertificateFromCsr action. The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST -P-256 or NIST P-384 curves. For supported certificates, consult Certificate signing -algorithms supported by IoT. Reusing the same certificate signing request (CSR) results -in a distinct certificate. You can create multiple certificates in a batch by creating a -directory, copying multiple .csr files into that directory, and then specifying that -directory on the command line. The following commands show how to create a batch of +P-256, NIST P-384, or NIST P-521 curves. For supported certificates, consult Certificate +signing algorithms supported by IoT. Reusing the same certificate signing request (CSR) +results in a distinct certificate. You can create multiple certificates in a batch by +creating a directory, copying multiple .csr files into that directory, and then specifying +that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory: On Linux and OS X, the command is: ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr @@ -887,6 +888,77 @@ function create_certificate_from_csr( ) end +""" + create_certificate_provider(account_default_for_operations, certificate_provider_name, lambda_function_arn) + create_certificate_provider(account_default_for_operations, certificate_provider_name, lambda_function_arn, params::Dict{String,<:Any}) + +Creates an Amazon Web Services IoT Core certificate provider. You can use Amazon Web +Services IoT Core certificate provider to customize how to sign a certificate signing +request (CSR) in IoT fleet provisioning. For more information, see Customizing certificate +signing using Amazon Web Services IoT Core certificate provider from Amazon Web Services +IoT Core Developer Guide. Requires permission to access the CreateCertificateProvider +action. After you create a certificate provider, the behavior of CreateCertificateFromCsr +API for fleet provisioning will change and all API calls to CreateCertificateFromCsr will +invoke the certificate provider to create the certificates. It can take up to a few minutes +for this behavior to change after a certificate provider is created. + +# Arguments +- `account_default_for_operations`: A list of the operations that the certificate provider + will use to generate certificates. Valid value: CreateCertificateFromCsr. +- `certificate_provider_name`: The name of the certificate provider. +- `lambda_function_arn`: The ARN of the Lambda function that defines the authentication + logic. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A string that you can optionally pass in the CreateCertificateProvider + request to make sure the request is idempotent. +- `"tags"`: Metadata which can be used to manage the certificate provider. +""" +function create_certificate_provider( + accountDefaultForOperations, + certificateProviderName, + lambdaFunctionArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iot( + "POST", + "/certificate-providers/$(certificateProviderName)", + Dict{String,Any}( + "accountDefaultForOperations" => accountDefaultForOperations, + "lambdaFunctionArn" => lambdaFunctionArn, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_certificate_provider( + accountDefaultForOperations, + certificateProviderName, + lambdaFunctionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iot( + "POST", + "/certificate-providers/$(certificateProviderName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "accountDefaultForOperations" => accountDefaultForOperations, + "lambdaFunctionArn" => lambdaFunctionArn, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_custom_metric(client_request_token, metric_name, metric_type) create_custom_metric(client_request_token, metric_name, metric_type, params::Dict{String,<:Any}) @@ -1042,6 +1114,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"serverCertificateArns"`: The ARNs of the certificates that IoT passes to the device during the TLS handshake. Currently you can specify only one certificate ARN. This value is not required for Amazon Web Services-managed domains. +- `"serverCertificateConfig"`: The server certificate configuration. - `"serviceType"`: The type of service delivered by the endpoint. Amazon Web Services IoT Core currently supports only the DATA service type. - `"tags"`: Metadata which can be used to manage the domain configuration. For URI Request @@ -1215,8 +1288,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"abortConfig"`: Allows you to create the criteria to abort a job. - `"description"`: A short text description of the job. - `"destinationPackageVersions"`: The package version Amazon Resource Names (ARNs) that are - installed on the device when the job successfully completes. Note:The following Length - Constraints relates to a single string. Up to five strings are allowed. + installed on the device when the job successfully completes. The package version must be in + either the Published or Deprecated state when the job deploys. For more information, see + Package version lifecycle. Note:The following Length Constraints relates to a single ARN. + Up to 25 package version ARNs are allowed. - `"document"`: The job document. Required if you don't specify a value for documentSource. - `"documentParameters"`: Parameters of an Amazon Web Services managed template that you can specify to create the job document. documentParameters can only be used when creating @@ -1234,7 +1309,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. - `"presignedUrlConfig"`: Configuration information for pre-signed S3 URLs. - `"schedulingConfig"`: The configuration that allows you to schedule a job for a future date and time in addition to specifying the end behavior for each job execution. @@ -1291,14 +1367,16 @@ Creates a job template. Requires permission to access the CreateJobTemplate acti Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"abortConfig"`: - `"destinationPackageVersions"`: The package version Amazon Resource Names (ARNs) that are - installed on the device when the job successfully completes. Note:The following Length - Constraints relates to a single string. Up to five strings are allowed. + installed on the device when the job successfully completes. The package version must be in + either the Published or Deprecated state when the job deploys. For more information, see + Package version lifecycle. Note:The following Length Constraints relates to a single ARN. + Up to 25 package version ARNs are allowed. - `"document"`: The job document. Required if you don't specify a value for documentSource. -- `"documentSource"`: An S3 link to the job document to use in the template. Required if - you don't specify a value for document. If the job document resides in an S3 bucket, you - must use a placeholder link when specifying the document. The placeholder link is of the - following form: {aws:iot:s3-presigned-url:https://s3.amazonaws.com/bucket/key} where - bucket is your bucket name and key is the object in the bucket to which you are linking. +- `"documentSource"`: An S3 link, or S3 object URL, to the job document. The link is an + Amazon S3 object URL and is required if you don't specify a value for document. For + example, --document-source + https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0 For more + information, see Methods for accessing a bucket. - `"jobArn"`: The ARN of the job to use as the basis for the job template. - `"jobExecutionsRetryConfig"`: Allows you to create the criteria to retry a job. - `"jobExecutionsRolloutConfig"`: @@ -1439,8 +1517,8 @@ access the CreateOTAUpdate action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalParameters"`: A list of additional OTA update parameters which are name-value - pairs. +- `"additionalParameters"`: A list of additional OTA update parameters, which are + name-value pairs. They won't be sent to devices as a part of the Job document. - `"awsJobAbortConfig"`: The criteria that determine when and how a job abort takes place. - `"awsJobExecutionsRolloutConfig"`: Configuration for the rollout of OTA updates. - `"awsJobPresignedUrlConfig"`: Configuration information for pre-signed URLs. @@ -1504,7 +1582,7 @@ Creates an IoT software package that can be deployed to your fleet. Requires per access the CreatePackage and GetIndexingConfiguration actions. # Arguments -- `package_name`: The name of the new package. +- `package_name`: The name of the new software package. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1548,7 +1626,7 @@ Creates a new version for an existing IoT software package. Requires permission the CreatePackageVersion and GetIndexingConfiguration actions. # Arguments -- `package_name`: The name of the associated package. +- `package_name`: The name of the associated software package. - `version_name`: The name of the new package version. # Optional Parameters @@ -1978,6 +2056,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys sent to the console.) Alerts are generated when a device (thing) violates a behavior. - `"behaviors"`: Specifies the behaviors that, when violated by a device (thing), cause an alert. +- `"metricsExportConfig"`: Specifies the MQTT topic and role ARN required for metric export. - `"securityProfileDescription"`: A description of the security profile. - `"tags"`: Metadata that can be used to manage the security profile. """ @@ -2104,8 +2183,9 @@ end create_thing_group(thing_group_name, params::Dict{String,<:Any}) Create a thing group. This is a control plane operation. See Authorization for information -about authorizing control plane actions. Requires permission to access the -CreateThingGroup action. +about authorizing control plane actions. If the ThingGroup that you create has the exact +same attributes as an existing ThingGroup, you will get a 200 success response. Requires +permission to access the CreateThingGroup action. # Arguments - `thing_group_name`: The thing group name to create. @@ -2503,6 +2583,43 @@ function delete_certificate( ) end +""" + delete_certificate_provider(certificate_provider_name) + delete_certificate_provider(certificate_provider_name, params::Dict{String,<:Any}) + +Deletes a certificate provider. Requires permission to access the DeleteCertificateProvider +action. If you delete the certificate provider resource, the behavior of +CreateCertificateFromCsr will resume, and IoT will create certificates signed by IoT from a +certificate signing request (CSR). + +# Arguments +- `certificate_provider_name`: The name of the certificate provider. + +""" +function delete_certificate_provider( + certificateProviderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot( + "DELETE", + "/certificate-providers/$(certificateProviderName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_certificate_provider( + certificateProviderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iot( + "DELETE", + "/certificate-providers/$(certificateProviderName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_custom_metric(metric_name) delete_custom_metric(metric_name, params::Dict{String,<:Any}) @@ -2708,7 +2825,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. """ function delete_job(jobId; aws_config::AbstractAWSConfig=global_aws_config()) return iot( @@ -2753,7 +2871,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. """ function delete_job_execution( executionNumber, jobId, thingName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2899,7 +3018,7 @@ deleted before deleting the software package. Requires permission to access the DeletePackageVersion action. # Arguments -- `package_name`: The name of the target package. +- `package_name`: The name of the target software package. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2937,11 +3056,11 @@ end delete_package_version(package_name, version_name, params::Dict{String,<:Any}) Deletes a specific version from a software package. Note: If a package version is -designated as default, you must remove the designation from the package using the +designated as default, you must remove the designation from the software package using the UpdatePackage action. # Arguments -- `package_name`: The name of the associated package. +- `package_name`: The name of the associated software package. - `version_name`: The name of the target package version. # Optional Parameters @@ -3874,6 +3993,41 @@ function describe_certificate( ) end +""" + describe_certificate_provider(certificate_provider_name) + describe_certificate_provider(certificate_provider_name, params::Dict{String,<:Any}) + +Describes a certificate provider. Requires permission to access the +DescribeCertificateProvider action. + +# Arguments +- `certificate_provider_name`: The name of the certificate provider. + +""" +function describe_certificate_provider( + certificateProviderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot( + "GET", + "/certificate-providers/$(certificateProviderName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_certificate_provider( + certificateProviderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iot( + "GET", + "/certificate-providers/$(certificateProviderName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_custom_metric(metric_name) describe_custom_metric(metric_name, params::Dict{String,<:Any}) @@ -4034,8 +4188,10 @@ end describe_endpoint() describe_endpoint(params::Dict{String,<:Any}) -Returns a unique endpoint specific to the Amazon Web Services account making the call. -Requires permission to access the DescribeEndpoint action. +Returns or creates a unique endpoint specific to the Amazon Web Services account making the +call. The first time DescribeEndpoint is called, an endpoint is created. All subsequent +calls to DescribeEndpoint return the same endpoint. Requires permission to access the +DescribeEndpoint action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4044,7 +4200,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys iot:CredentialProvider - Returns an IoT credentials provider API endpoint. iot:Jobs - Returns an IoT device management Jobs API endpoint. We strongly recommend that customers use the newer iot:Data-ATS endpoint type to avoid issues related to the - widespread distrust of Symantec certificate authorities. + widespread distrust of Symantec certificate authorities. ATS Signed Certificates are more + secure and are trusted by most popular browsers. """ function describe_endpoint(; aws_config::AbstractAWSConfig=global_aws_config()) return iot("GET", "/endpoint"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -5216,7 +5373,7 @@ Gets information about the specified software package. Requires permission to ac GetPackage action. # Arguments -- `package_name`: The name of the target package. +- `package_name`: The name of the target software package. """ function get_package(packageName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -5429,8 +5586,10 @@ end get_registration_code() get_registration_code(params::Dict{String,<:Any}) -Gets a registration code used to register a CA certificate with IoT. Requires permission to -access the GetRegistrationCode action. +Gets a registration code used to register a CA certificate with IoT. IoT will create a +registration code as part of this API call if the registration code doesn't exist or has +been deleted. If you already have a registration code, this API call will return the same +registration code. Requires permission to access the GetRegistrationCode action. """ function get_registration_code(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -5985,6 +6144,40 @@ function list_cacertificates( ) end +""" + list_certificate_providers() + list_certificate_providers(params::Dict{String,<:Any}) + +Lists all your certificate providers in your Amazon Web Services account. Requires +permission to access the ListCertificateProviders action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"isAscendingOrder"`: Returns the list of certificate providers in ascending alphabetical + order. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. +""" +function list_certificate_providers(; aws_config::AbstractAWSConfig=global_aws_config()) + return iot( + "GET", + "/certificate-providers/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_certificate_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot( + "GET", + "/certificate-providers/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_certificates() list_certificates(params::Dict{String,<:Any}) @@ -6344,7 +6537,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. - `"nextToken"`: The token to retrieve the next set of results. - `"status"`: An optional filter that lets you search for jobs that have the specified status. @@ -6414,7 +6608,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. - `"nextToken"`: The token to retrieve the next set of results. - `"status"`: An optional filter that lets you search for jobs that have the specified status. @@ -6642,7 +6837,7 @@ Lists the software package versions associated to the account. Requires permissi access the ListPackageVersions action. # Arguments -- `package_name`: The name of the target package. +- `package_name`: The name of the target software package. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -8221,7 +8416,10 @@ The query search index. Requires permission to access the SearchIndex action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"indexName"`: The search index name. -- `"maxResults"`: The maximum number of results to return at one time. +- `"maxResults"`: The maximum number of results to return per page at one time. This + maximum number cannot exceed 100. The response might contain fewer results but will never + contain more. You can use nextToken to retrieve the next set of results until nextToken + returns NULL. - `"nextToken"`: The token used to get the next set of results, or null if there are no additional results. - `"queryVersion"`: The query version. @@ -9234,6 +9432,47 @@ function update_certificate( ) end +""" + update_certificate_provider(certificate_provider_name) + update_certificate_provider(certificate_provider_name, params::Dict{String,<:Any}) + +Updates a certificate provider. Requires permission to access the UpdateCertificateProvider +action. + +# Arguments +- `certificate_provider_name`: The name of the certificate provider. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountDefaultForOperations"`: A list of the operations that the certificate provider + will use to generate certificates. Valid value: CreateCertificateFromCsr. +- `"lambdaFunctionArn"`: The Lambda function ARN that's associated with the certificate + provider. +""" +function update_certificate_provider( + certificateProviderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot( + "PUT", + "/certificate-providers/$(certificateProviderName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_certificate_provider( + certificateProviderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iot( + "PUT", + "/certificate-providers/$(certificateProviderName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_custom_metric(display_name, metric_name) update_custom_metric(display_name, metric_name, params::Dict{String,<:Any}) @@ -9337,6 +9576,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"domainConfigurationStatus"`: The status to which the domain configuration should be updated. - `"removeAuthorizerConfig"`: Removes the authorization configuration from a domain. +- `"serverCertificateConfig"`: The server certificate configuration. - `"tlsConfig"`: An object that specifies the TLS configuration for a domain. """ function update_domain_configuration( @@ -9548,7 +9788,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format. aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/ The namespaceId feature - is in public preview. + is only supported by IoT Greengrass at this time. For more information, see Setting up IoT + Greengrass core devices. - `"presignedUrlConfig"`: Configuration information for pre-signed S3 URLs. - `"timeoutConfig"`: Specifies the amount of time each device has to finish its execution of the job. The timer is started when the job execution status is set to IN_PROGRESS. If @@ -9617,11 +9858,11 @@ end update_package(package_name) update_package(package_name, params::Dict{String,<:Any}) -Updates the supported fields for a specific package. Requires permission to access the -UpdatePackage and GetIndexingConfiguration actions. +Updates the supported fields for a specific software package. Requires permission to access +the UpdatePackage and GetIndexingConfiguration actions. # Arguments -- `package_name`: The name of the target package. +- `package_name`: The name of the target software package. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -9665,7 +9906,7 @@ end update_package_configuration() update_package_configuration(params::Dict{String,<:Any}) -Updates the package configuration. Requires permission to access the +Updates the software package configuration. Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions. # Optional Parameters @@ -9715,9 +9956,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"action"`: The status that the package version should be assigned. For more information, see Package version lifecycle. - `"attributes"`: Metadata that can be used to define a package version’s configuration. - For example, the S3 file location, configuration options that are being sent to the device - or fleet. Note: Attributes can be updated only when the package version is in a draft - state. The combined size of all the attributes on a package version is limited to 3KB. + For example, the Amazon S3 file location, configuration options that are being sent to the + device or fleet. Note: Attributes can be updated only when the package version is in a + draft state. The combined size of all the attributes on a package version is limited to 3KB. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -9916,9 +10157,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys profile. If any alertTargets are defined in the current invocation, an exception occurs. - `"deleteBehaviors"`: If true, delete all behaviors defined for this security profile. If any behaviors are defined in the current invocation, an exception occurs. +- `"deleteMetricsExportConfig"`: Set the value as true to delete metrics export related + configurations. - `"expectedVersion"`: The expected version of the security profile. A new version is generated whenever the security profile is updated. If you specify a value that is different from the actual version, a VersionConflictException is thrown. +- `"metricsExportConfig"`: Specifies the MQTT topic and role ARN required for metric export. - `"securityProfileDescription"`: A description of the security profile. """ function update_security_profile( diff --git a/src/services/iot_roborunner.jl b/src/services/iot_roborunner.jl deleted file mode 100644 index 967f286858..0000000000 --- a/src/services/iot_roborunner.jl +++ /dev/null @@ -1,729 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: iot_roborunner -using AWS.Compat -using AWS.UUIDs - -""" - create_destination(name, site) - create_destination(name, site, params::Dict{String,<:Any}) - -Grants permission to create a destination - -# Arguments -- `name`: -- `site`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"clientToken"`: -- `"state"`: The state of the destination. Default used if not specified. -""" -function create_destination(name, site; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/createDestination", - Dict{String,Any}("name" => name, "site" => site, "clientToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_destination( - name, - site, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return iot_roborunner( - "POST", - "/createDestination", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "name" => name, "site" => site, "clientToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_site(country_code, name) - create_site(country_code, name, params::Dict{String,<:Any}) - -Grants permission to create a site - -# Arguments -- `country_code`: -- `name`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: -- `"description"`: -""" -function create_site(countryCode, name; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/createSite", - Dict{String,Any}( - "countryCode" => countryCode, "name" => name, "clientToken" => string(uuid4()) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_site( - countryCode, - name, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return iot_roborunner( - "POST", - "/createSite", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "countryCode" => countryCode, - "name" => name, - "clientToken" => string(uuid4()), - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_worker(fleet, name) - create_worker(fleet, name, params::Dict{String,<:Any}) - -Grants permission to create a worker - -# Arguments -- `fleet`: -- `name`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"additionalTransientProperties"`: -- `"clientToken"`: -- `"orientation"`: -- `"position"`: -- `"vendorProperties"`: -""" -function create_worker(fleet, name; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/createWorker", - Dict{String,Any}( - "fleet" => fleet, "name" => name, "clientToken" => string(uuid4()) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_worker( - fleet, - name, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return iot_roborunner( - "POST", - "/createWorker", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "fleet" => fleet, "name" => name, "clientToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_worker_fleet(name, site) - create_worker_fleet(name, site, params::Dict{String,<:Any}) - -Grants permission to create a worker fleet - -# Arguments -- `name`: -- `site`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"clientToken"`: -""" -function create_worker_fleet(name, site; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/createWorkerFleet", - Dict{String,Any}("name" => name, "site" => site, "clientToken" => string(uuid4())); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_worker_fleet( - name, - site, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return iot_roborunner( - "POST", - "/createWorkerFleet", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "name" => name, "site" => site, "clientToken" => string(uuid4()) - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_destination(id) - delete_destination(id, params::Dict{String,<:Any}) - -Grants permission to delete a destination - -# Arguments -- `id`: - -""" -function delete_destination(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/deleteDestination", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_destination( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/deleteDestination", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_site(id) - delete_site(id, params::Dict{String,<:Any}) - -Grants permission to delete a site - -# Arguments -- `id`: - -""" -function delete_site(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/deleteSite", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_site( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/deleteSite", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_worker(id) - delete_worker(id, params::Dict{String,<:Any}) - -Grants permission to delete a worker - -# Arguments -- `id`: - -""" -function delete_worker(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/deleteWorker", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_worker( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/deleteWorker", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_worker_fleet(id) - delete_worker_fleet(id, params::Dict{String,<:Any}) - -Grants permission to delete a worker fleet - -# Arguments -- `id`: - -""" -function delete_worker_fleet(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/deleteWorkerFleet", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_worker_fleet( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/deleteWorkerFleet", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_destination(id) - get_destination(id, params::Dict{String,<:Any}) - -Grants permission to get a destination - -# Arguments -- `id`: - -""" -function get_destination(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/getDestination", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_destination( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/getDestination", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_site(id) - get_site(id, params::Dict{String,<:Any}) - -Grants permission to get a site - -# Arguments -- `id`: - -""" -function get_site(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/getSite", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_site( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/getSite", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_worker(id) - get_worker(id, params::Dict{String,<:Any}) - -Grants permission to get a worker - -# Arguments -- `id`: - -""" -function get_worker(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/getWorker", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_worker( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/getWorker", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - get_worker_fleet(id) - get_worker_fleet(id, params::Dict{String,<:Any}) - -Grants permission to get a worker fleet - -# Arguments -- `id`: - -""" -function get_worker_fleet(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/getWorkerFleet", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function get_worker_fleet( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/getWorkerFleet", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_destinations(site) - list_destinations(site, params::Dict{String,<:Any}) - -Grants permission to list destinations - -# Arguments -- `site`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: -- `"state"`: -""" -function list_destinations(site; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/listDestinations", - Dict{String,Any}("site" => site); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_destinations( - site, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/listDestinations", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("site" => site), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_sites() - list_sites(params::Dict{String,<:Any}) - -Grants permission to list sites - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: -""" -function list_sites(; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", "/listSites"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_sites( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", "/listSites", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_worker_fleets(site) - list_worker_fleets(site, params::Dict{String,<:Any}) - -Grants permission to list worker fleets - -# Arguments -- `site`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: -""" -function list_worker_fleets(site; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/listWorkerFleets", - Dict{String,Any}("site" => site); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_worker_fleets( - site, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/listWorkerFleets", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("site" => site), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_workers(site) - list_workers(site, params::Dict{String,<:Any}) - -Grants permission to list workers - -# Arguments -- `site`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"fleet"`: -- `"maxResults"`: -- `"nextToken"`: -""" -function list_workers(site; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "GET", - "/listWorkers", - Dict{String,Any}("site" => site); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_workers( - site, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "GET", - "/listWorkers", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("site" => site), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_destination(id) - update_destination(id, params::Dict{String,<:Any}) - -Grants permission to update a destination - -# Arguments -- `id`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"name"`: -- `"state"`: -""" -function update_destination(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/updateDestination", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_destination( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/updateDestination", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_site(id) - update_site(id, params::Dict{String,<:Any}) - -Grants permission to update a site - -# Arguments -- `id`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"countryCode"`: -- `"description"`: -- `"name"`: -""" -function update_site(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/updateSite", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_site( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/updateSite", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_worker(id) - update_worker(id, params::Dict{String,<:Any}) - -Grants permission to update a worker - -# Arguments -- `id`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"additionalTransientProperties"`: -- `"name"`: -- `"orientation"`: -- `"position"`: -- `"vendorProperties"`: -""" -function update_worker(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/updateWorker", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_worker( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/updateWorker", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_worker_fleet(id) - update_worker_fleet(id, params::Dict{String,<:Any}) - -Grants permission to update a worker fleet - -# Arguments -- `id`: - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"additionalFixedProperties"`: -- `"name"`: -""" -function update_worker_fleet(id; aws_config::AbstractAWSConfig=global_aws_config()) - return iot_roborunner( - "POST", - "/updateWorkerFleet", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_worker_fleet( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return iot_roborunner( - "POST", - "/updateWorkerFleet", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/iot_wireless.jl b/src/services/iot_wireless.jl index d8c0594b51..4e56a8d8eb 100644 --- a/src/services/iot_wireless.jl +++ b/src/services/iot_wireless.jl @@ -15,10 +15,13 @@ Associates a partner account with your AWS account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Tags"`: The tags to attach to the specified resource. Tags are metadata that you can use to manage a resource. """ @@ -343,10 +346,13 @@ Creates a new destination that maps a device message to an AWS IoT rule. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Description"`: The description of the new resource. - `"Tags"`: The tags to attach to the new destination. Tags are metadata that you can use to manage a resource. @@ -409,10 +415,13 @@ Creates a new device profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"LoRaWAN"`: The device profile information to use to create the device profile. - `"Name"`: The name of the new resource. - `"Sidewalk"`: The Sidewalk-related information for creating the Sidewalk device profile. @@ -518,10 +527,13 @@ Creates a multicast group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Description"`: The description of the multicast group. - `"Name"`: - `"Tags"`: @@ -614,10 +626,13 @@ Creates a new service profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"LoRaWAN"`: The service profile information to use to create the service profile. - `"Name"`: The name of the new resource. - `"Tags"`: The tags to attach to the new service profile. Tags are metadata that you can @@ -660,10 +675,13 @@ Provisions a wireless device. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Description"`: The description of the new resource. - `"LoRaWAN"`: The device configuration information to use to create the wireless device. - `"Name"`: The name of the new resource. @@ -717,17 +735,24 @@ end create_wireless_gateway(lo_ra_wan) create_wireless_gateway(lo_ra_wan, params::Dict{String,<:Any}) -Provisions a wireless gateway. +Provisions a wireless gateway. When provisioning a wireless gateway, you might run into +duplication errors for the following reasons. If you specify a GatewayEui value that +already exists. If you used a ClientRequestToken with the same parameters within the last +10 minutes. To avoid this error, make sure that you use unique identifiers and parameters +for each request within the specified time period. # Arguments - `lo_ra_wan`: The gateway configuration information to use to create the wireless gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Description"`: The description of the new resource. - `"Name"`: The name of the new resource. - `"Tags"`: The tags to attach to the new wireless gateway. Tags are metadata that you can @@ -822,10 +847,13 @@ Creates a gateway task definition. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: Each resource must have a unique client request token. If you try - to create a new resource with the same token as a resource that already exists, an - exception occurs. If you omit this value, AWS SDKs will automatically generate a unique - client request. +- `"ClientRequestToken"`: Each resource must have a unique client request token. The client + token is used to implement idempotency. It ensures that the request completes no more than + one time. If you retry a request with the same token and the same parameters, the request + will complete successfully. However, if you try to create a new resource using the same + token but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS + SDKs will automatically generate a unique client request. For more information about + idempotency, see Ensuring idempotency in Amazon EC2 API requests. - `"Name"`: The name of the new resource. - `"Tags"`: The tags to attach to the specified resource. Tags are metadata that you can use to manage a resource. @@ -1161,7 +1189,11 @@ end delete_wireless_gateway(id) delete_wireless_gateway(id, params::Dict{String,<:Any}) -Deletes a wireless gateway. +Deletes a wireless gateway. When deleting a wireless gateway, you might run into +duplication errors for the following reasons. If you specify a GatewayEui value that +already exists. If you used a ClientRequestToken with the same parameters within the last +10 minutes. To avoid this error, make sure that you use unique identifiers and parameters +for each request within the specified time period. # Arguments - `id`: The ID of the resource to delete. @@ -1671,6 +1703,56 @@ function get_log_levels_by_resource_types( ) end +""" + get_metric_configuration() + get_metric_configuration(params::Dict{String,<:Any}) + +Get the metric configuration status for this AWS account. + +""" +function get_metric_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return iot_wireless( + "GET", + "/metric-configuration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_metric_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot_wireless( + "GET", + "/metric-configuration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_metrics() + get_metrics(params::Dict{String,<:Any}) + +Get the summary metrics for this AWS account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"SummaryMetricQueries"`: The list of queries to retrieve the summary metrics. +""" +function get_metrics(; aws_config::AbstractAWSConfig=global_aws_config()) + return iot_wireless( + "POST", "/metrics"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_metrics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot_wireless( + "POST", "/metrics", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ get_multicast_group(id) get_multicast_group(id, params::Dict{String,<:Any}) @@ -3770,6 +3852,36 @@ function update_log_levels_by_resource_types( ) end +""" + update_metric_configuration() + update_metric_configuration(params::Dict{String,<:Any}) + +Update the summary metric configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"SummaryMetric"`: The value to be used to set summary metric configuration. +""" +function update_metric_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return iot_wireless( + "PUT", + "/metric-configuration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_metric_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return iot_wireless( + "PUT", + "/metric-configuration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_multicast_group(id) update_multicast_group(id, params::Dict{String,<:Any}) @@ -3820,8 +3932,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MulticastGroupsToAdd"`: Multicast group resources to add to the network analyzer configuration. Provide the MulticastGroupId of the resource to add in the input array. - `"MulticastGroupsToRemove"`: Multicast group resources to remove from the network - analyzer configuration. Provide the MulticastGroupId of the resource to remove in the input - array. + analyzer configuration. Provide the MulticastGroupId of the resources to remove in the + input array. - `"TraceContent"`: - `"WirelessDevicesToAdd"`: Wireless device resources to add to the network analyzer configuration. Provide the WirelessDeviceId of the resource to add in the input array. diff --git a/src/services/iotfleethub.jl b/src/services/iotfleethub.jl index a7073b27ef..41d432dcd0 100644 --- a/src/services/iotfleethub.jl +++ b/src/services/iotfleethub.jl @@ -8,13 +8,17 @@ using AWS.UUIDs create_application(application_name, role_arn) create_application(application_name, role_arn, params::Dict{String,<:Any}) -Creates a Fleet Hub for AWS IoT Device Management web application. Fleet Hub for AWS IoT -Device Management is in public preview and is subject to change. +Creates a Fleet Hub for IoT Device Management web application. When creating a Fleet Hub +application, you must create an organization instance of IAM Identity Center if you don't +already have one. The Fleet Hub application you create must also be in the same Amazon Web +Services Region of the organization instance of IAM Identity Center. For more information +see Enabling IAM Identity Center and Organization instances of IAM Identity Center. # Arguments - `application_name`: The name of the web application. - `role_arn`: The ARN of the role that the web application assumes when it interacts with - AWS IoT Core. The name of the role must be in the form AWSIotFleetHub_random_string . + Amazon Web Services IoT Core. The name of the role must be in the form + AWSIotFleetHub_random_string . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -69,8 +73,7 @@ end delete_application(application_id) delete_application(application_id, params::Dict{String,<:Any}) -Deletes a Fleet Hub for AWS IoT Device Management web application. Fleet Hub for AWS IoT -Device Management is in public preview and is subject to change. +Deletes a Fleet Hub for IoT Device Management web application. # Arguments - `application_id`: The unique Id of the web application. @@ -112,8 +115,7 @@ end describe_application(application_id) describe_application(application_id, params::Dict{String,<:Any}) -Gets information about a Fleet Hub for AWS IoT Device Management web application. Fleet -Hub for AWS IoT Device Management is in public preview and is subject to change. +Gets information about a Fleet Hub for IoT Device Management web application. # Arguments - `application_id`: The unique Id of the web application. @@ -147,9 +149,7 @@ end list_applications() list_applications(params::Dict{String,<:Any}) -Gets a list of Fleet Hub for AWS IoT Device Management web applications for the current -account. Fleet Hub for AWS IoT Device Management is in public preview and is subject to -change. +Gets a list of Fleet Hub for IoT Device Management web applications for the current account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -176,8 +176,7 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Lists the tags for the specified resource. Fleet Hub for AWS IoT Device Management is in -public preview and is subject to change. +Lists the tags for the specified resource. # Arguments - `resource_arn`: The ARN of the resource. @@ -212,8 +211,7 @@ end tag_resource(resource_arn, tags, params::Dict{String,<:Any}) Adds to or modifies the tags of the specified resource. Tags are metadata which can be used -to manage a resource. Fleet Hub for AWS IoT Device Management is in public preview and is -subject to change. +to manage a resource. # Arguments - `resource_arn`: The ARN of the resource. @@ -248,8 +246,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes the specified tags (metadata) from the resource. Fleet Hub for AWS IoT Device -Management is in public preview and is subject to change. +Removes the specified tags (metadata) from the resource. # Arguments - `resource_arn`: The ARN of the resource. @@ -286,8 +283,7 @@ end update_application(application_id) update_application(application_id, params::Dict{String,<:Any}) -Updates information about a Fleet Hub for a AWS IoT Device Management web application. -Fleet Hub for AWS IoT Device Management is in public preview and is subject to change. +Updates information about a Fleet Hub for IoT Device Management web application. # Arguments - `application_id`: The unique Id of the web application. diff --git a/src/services/iotfleetwise.jl b/src/services/iotfleetwise.jl index a36a5ff4e2..216bb60043 100644 --- a/src/services/iotfleetwise.jl +++ b/src/services/iotfleetwise.jl @@ -132,8 +132,8 @@ Web Services IoT FleetWise Developer Guide. - `collection_scheme`: The data collection scheme associated with the campaign. You can specify a scheme that collects data based on time or an event. - `name`: The name of the campaign to create. -- `signal_catalog_arn`: (Optional) The Amazon Resource Name (ARN) of the signal catalog to - associate with the campaign. +- `signal_catalog_arn`: The Amazon Resource Name (ARN) of the signal catalog to associate + with the campaign. - `target_arn`: The ARN of the vehicle or fleet to deploy a campaign to. # Optional Parameters @@ -144,9 +144,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dataDestinationConfigs"`: The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream. Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, - centralized data storage, data processing pipelines, and analytics. You can use Amazon - Timestream to access and analyze time series data, and Timestream to query vehicle data so - that you can identify trends and patterns. + centralized data storage, data processing pipelines, and analytics. Amazon Web Services IoT + FleetWise supports at-least-once file delivery to S3. Your vehicle data is stored on + multiple Amazon Web Services IoT FleetWise servers for redundancy and high availability. + You can use Amazon Timestream to access and analyze time series data, and Timestream to + query vehicle data so that you can identify trends and patterns. - `"dataExtraDimensions"`: (Optional) A list of vehicle attributes to associate with a campaign. Enrich the data with specified vehicle attributes. For example, add make and model to the campaign, and Amazon Web Services IoT FleetWise will associate the data with @@ -766,6 +768,30 @@ function get_decoder_manifest( ) end +""" + get_encryption_configuration() + get_encryption_configuration(params::Dict{String,<:Any}) + +Retrieves the encryption configuration for resources and data in Amazon Web Services IoT +FleetWise. + +""" +function get_encryption_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return iotfleetwise( + "GetEncryptionConfiguration"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_encryption_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotfleetwise( + "GetEncryptionConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_fleet(fleet_id) get_fleet(fleet_id, params::Dict{String,<:Any}) @@ -1365,6 +1391,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value. +- `"signalNodeType"`: The type of node in the signal catalog. """ function list_signal_catalog_nodes(name; aws_config::AbstractAWSConfig=global_aws_config()) return iotfleetwise( @@ -1460,6 +1487,10 @@ Specify the nextToken parameter in the request to return more results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attributeNames"`: The fully qualified names of the attributes. For example, the fully + qualified name of an attribute might be Vehicle.Body.Engine.Type. +- `"attributeValues"`: Static information about a vehicle attribute value in string format. + For example: \"1.3 L R2\" - `"maxResults"`: The maximum number of items to return, between 1 and 100, inclusive. - `"modelManifestArn"`: The Amazon Resource Name (ARN) of a vehicle model (model manifest). You can use this optional parameter to list only the vehicles created from a @@ -1522,6 +1553,48 @@ function list_vehicles_in_fleet( ) end +""" + put_encryption_configuration(encryption_type) + put_encryption_configuration(encryption_type, params::Dict{String,<:Any}) + +Creates or updates the encryption configuration. Amazon Web Services IoT FleetWise can +encrypt your data and resources using an Amazon Web Services managed key. Or, you can use a +KMS key that you own and manage. For more information, see Data encryption in the Amazon +Web Services IoT FleetWise Developer Guide. + +# Arguments +- `encryption_type`: The type of encryption. Choose KMS_BASED_ENCRYPTION to use a KMS key + or FLEETWISE_DEFAULT_ENCRYPTION to use an Amazon Web Services managed key. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"kmsKeyId"`: The ID of the KMS key that is used for encryption. +""" +function put_encryption_configuration( + encryptionType; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotfleetwise( + "PutEncryptionConfiguration", + Dict{String,Any}("encryptionType" => encryptionType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_encryption_configuration( + encryptionType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotfleetwise( + "PutEncryptionConfiguration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("encryptionType" => encryptionType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_logging_options(cloud_watch_log_delivery) put_logging_options(cloud_watch_log_delivery, params::Dict{String,<:Any}) @@ -1575,22 +1648,14 @@ DeleteCampaign API operation. If you want to delete the Timestream inline policy service-linked role, such as to mitigate an overly permissive policy, you must first delete any existing campaigns. Then delete the service-linked role and register your account again to enable CloudWatch metrics. For more information, see DeleteServiceLinkedRole in the -Identity and Access Management API Reference. <p>Registers your Amazon Web Services -account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can -transfer your vehicle data to the Amazon Web Services Cloud. For more information, -including step-by-step procedures, see <a -href="https://docs.aws.amazon.com/iot-fleetwise/latest/developerguide/setting-up.html&q -uot;>Setting up Amazon Web Services IoT FleetWise</a>. </p> <note> -<p>An Amazon Web Services account is <b>not</b> the same thing as a -"user." An <a -href="https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_identity-management -.html#intro-identity-users">Amazon Web Services user</a> is an identity that -you create using Identity and Access Management (IAM) and takes the form of either an <a -href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html">IAM -user</a> or an <a -href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html">IAM -role, both with credentials</a>. A single Amazon Web Services account can, and -typically does, contain many users and roles.</p> </note> +Identity and Access Management API Reference. Registers your Amazon Web Services account, +IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your +vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step +procedures, see Setting up Amazon Web Services IoT FleetWise. An Amazon Web Services +account is not the same thing as a \"user.\" An Amazon Web Services user is an identity +that you create using Identity and Access Management (IAM) and takes the form of either an +IAM user or an IAM role, both with credentials. A single Amazon Web Services account can, +and typically does, contain many users and roles. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/iotsitewise.jl b/src/services/iotsitewise.jl index 2fa291914d..69d92c5fbf 100644 --- a/src/services/iotsitewise.jl +++ b/src/services/iotsitewise.jl @@ -13,11 +13,19 @@ parent asset's model. For more information, see Associating assets in the IoT Si Guide. # Arguments -- `asset_id`: The ID of the parent asset. -- `child_asset_id`: The ID of the child asset to be associated. -- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. Hierarchies allow - different groupings of assets to be formed that all come from the same asset model. For - more information, see Asset hierarchies in the IoT SiteWise User Guide. +- `asset_id`: The ID of the parent asset. This can be either the actual ID in UUID format, + or else externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. +- `child_asset_id`: The ID of the child asset to be associated. This can be either the + actual ID in UUID format, or else externalId: followed by the external ID, if it has one. + For more information, see Referencing objects with external IDs in the IoT SiteWise User + Guide. +- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. (This can be either + the actual ID in UUID format, or else externalId: followed by the external ID, if it has + one. For more information, see Referencing objects with external IDs in the IoT SiteWise + User Guide.) Hierarchies allow different groupings of assets to be formed that all come + from the same asset model. For more information, see Asset hierarchies in the IoT SiteWise + User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -74,8 +82,13 @@ Associates a time series (data stream) with an asset property. # Arguments - `alias`: The alias that identifies the time series. -- `asset_id`: The ID of the asset in which the asset property was created. -- `property_id`: The ID of the asset property. +- `asset_id`: The ID of the asset in which the asset property was created. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. +- `property_id`: The ID of the asset property. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -469,12 +482,22 @@ Creates an asset from an existing asset model. For more information, see Creatin the IoT SiteWise User Guide. # Arguments -- `asset_model_id`: The ID of the asset model from which to create the asset. +- `asset_model_id`: The ID of the asset model from which to create the asset. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. - `asset_name`: A friendly name for the asset. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"assetDescription"`: A description for the asset. +- `"assetExternalId"`: An external ID to assign to the asset. The external ID must be + unique within your Amazon Web Services account. For more information, see Using external + IDs in the IoT SiteWise User Guide. +- `"assetId"`: The ID to assign to the asset, if desired. IoT SiteWise automatically + generates a unique ID for you, so this parameter is never required. However, if you prefer + to supply your own ID instead, you can specify it here in UUID format. If you specify your + own ID, it must be globally unique. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -529,26 +552,43 @@ Creates an asset model from specified property and hierarchy definitions. You cr from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the -IoT SiteWise User Guide. +IoT SiteWise User Guide. You can create two types of asset models, ASSET_MODEL or +COMPONENT_MODEL. ASSET_MODEL – (default) An asset model that you can use to create +assets. Can't be included as a component in another asset model. COMPONENT_MODEL – A +reusable component that you can include in the composite models of other asset models. You +can't create assets directly from this type of asset model. # Arguments - `asset_model_name`: A unique, friendly name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetModelCompositeModels"`: The composite asset models that are part of this asset - model. Composite asset models are asset models that contain specific properties. Each - composite model has a type that defines the properties that the composite model supports. - Use composite asset models to define alarms on this asset model. +- `"assetModelCompositeModels"`: The composite models that are part of this asset model. It + groups properties (such as attributes, measurements, transforms, and metrics) and child + composite models that model parts of your industrial equipment. Each composite model has a + type that defines the properties that the composite model supports. Use composite models to + define alarms on this asset model. When creating custom composite models, you need to use + CreateAssetModelCompositeModel. For more information, see <LINK>. - `"assetModelDescription"`: A description for the asset model. +- `"assetModelExternalId"`: An external ID to assign to the asset model. The external ID + must be unique within your Amazon Web Services account. For more information, see Using + external IDs in the IoT SiteWise User Guide. - `"assetModelHierarchies"`: The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset hierarchies in the IoT SiteWise User Guide. You can specify up to 10 hierarchies per asset model. For more information, see Quotas in the IoT SiteWise User Guide. +- `"assetModelId"`: The ID to assign to the asset model, if desired. IoT SiteWise + automatically generates a unique ID for you, so this parameter is never required. However, + if you prefer to supply your own ID instead, you can specify it here in UUID format. If you + specify your own ID, it must be globally unique. - `"assetModelProperties"`: The property definitions of the asset model. For more information, see Asset properties in the IoT SiteWise User Guide. You can specify up to 200 properties per asset model. For more information, see Quotas in the IoT SiteWise User Guide. +- `"assetModelType"`: The type of asset model. ASSET_MODEL – (default) An asset model + that you can use to create assets. Can't be included as a component in another asset model. + COMPONENT_MODEL – A reusable component that you can include in the composite models of + other asset models. You can't create assets directly from this type of asset model. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -590,14 +630,105 @@ function create_asset_model( ) end +""" + create_asset_model_composite_model(asset_model_composite_model_name, asset_model_composite_model_type, asset_model_id) + create_asset_model_composite_model(asset_model_composite_model_name, asset_model_composite_model_type, asset_model_id, params::Dict{String,<:Any}) + +Creates a custom composite model from specified property and hierarchy definitions. There +are two types of custom composite models, inline and component-model-based. Use +component-model-based custom composite models to define standard, reusable components. A +component-model-based custom composite model consists of a name, a description, and the ID +of the component model it references. A component-model-based custom composite model has no +properties of its own; its referenced component model provides its associated properties to +any created assets. For more information, see Custom composite models (Components) in the +IoT SiteWise User Guide. Use inline custom composite models to organize the properties of +an asset model. The properties of inline custom composite models are local to the asset +model where they are included and can't be used to create multiple assets. To create a +component-model-based model, specify the composedAssetModelId of an existing asset model +with assetModelType of COMPONENT_MODEL. To create an inline model, specify the +assetModelCompositeModelProperties and don't include an composedAssetModelId. + +# Arguments +- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_type`: The composite model type. Valid values are AWS/ALARM, + CUSTOM, or AWS/L4E_ANOMALY. +- `asset_model_id`: The ID of the asset model this composite model is a part of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelCompositeModelDescription"`: A description for the composite model. +- `"assetModelCompositeModelExternalId"`: An external ID to assign to the composite model. + If the composite model is a derived composite model, or one nested inside a component + model, you can only set the external ID using UpdateAssetModelCompositeModel and specifying + the derived ID of the model or property from the created model it's a part of. +- `"assetModelCompositeModelId"`: The ID of the composite model. IoT SiteWise automatically + generates a unique ID for you, so this parameter is never required. However, if you prefer + to supply your own ID instead, you can specify it here in UUID format. If you specify your + own ID, it must be globally unique. +- `"assetModelCompositeModelProperties"`: The property definitions of the composite model. + For more information, see <LINK>. You can specify up to 200 properties per composite + model. For more information, see Quotas in the IoT SiteWise User Guide. +- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the + idempotency of the request. Don't reuse this client token if a new idempotent request is + required. +- `"composedAssetModelId"`: The ID of a composite model on this asset. +- `"parentAssetModelCompositeModelId"`: The ID of the parent composite model in this asset + model relationship. +""" +function create_asset_model_composite_model( + assetModelCompositeModelName, + assetModelCompositeModelType, + assetModelId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "POST", + "/asset-models/$(assetModelId)/composite-models", + Dict{String,Any}( + "assetModelCompositeModelName" => assetModelCompositeModelName, + "assetModelCompositeModelType" => assetModelCompositeModelType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_model_composite_model( + assetModelCompositeModelName, + assetModelCompositeModelType, + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "POST", + "/asset-models/$(assetModelId)/composite-models", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "assetModelCompositeModelName" => assetModelCompositeModelName, + "assetModelCompositeModelType" => assetModelCompositeModelType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_bulk_import_job(error_report_location, files, job_configuration, job_name, job_role_arn) create_bulk_import_job(error_report_location, files, job_configuration, job_name, job_role_arn, params::Dict{String,<:Any}) Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see -Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide. You must -enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. For -more information about how to configure storage settings, see PutStorageConfiguration. +Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide. Before you +create a bulk import job, you must enable IoT SiteWise warm tier or IoT SiteWise cold tier. +For more information about how to configure storage settings, see PutStorageConfiguration. +Bulk import is designed to store historical data to IoT SiteWise. It does not trigger +computations or notifications on IoT SiteWise warm or cold tier storage. # Arguments - `error_report_location`: The Amazon S3 destination where errors associated with the job @@ -608,6 +739,13 @@ more information about how to configure storage settings, see PutStorageConfigur - `job_name`: The unique name that helps identify the job request. - `job_role_arn`: The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"adaptiveIngestion"`: If set to true, ingest new data into IoT SiteWise storage. + Measurements with notifications, metrics and transforms are computed. If set to false, + historical data is ingested into IoT SiteWise as is. +- `"deleteFilesAfterImport"`: If set to true, your data files is deleted from S3, after + ingestion into IoT SiteWise storage. """ function create_bulk_import_job( errorReportLocation, @@ -812,13 +950,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys use the IoT Events managed Lambda function to manage your emails, you must verify the sender email address in Amazon SES. - `"portalAuthMode"`: The service to use to authenticate users to the portal. Choose from - the following options: SSO – The portal uses IAM Identity Center (successor to Single - Sign-On) to authenticate users and manage user permissions. Before you can create a portal - that uses IAM Identity Center, you must enable IAM Identity Center. For more information, - see Enabling IAM Identity Center in the IoT SiteWise User Guide. This option is only - available in Amazon Web Services Regions other than the China Regions. IAM – The - portal uses Identity and Access Management to authenticate users and manage user - permissions. You can't change this value after you create a portal. Default: SSO + the following options: SSO – The portal uses IAM Identity Center to authenticate users + and manage user permissions. Before you can create a portal that uses IAM Identity Center, + you must enable IAM Identity Center. For more information, see Enabling IAM Identity Center + in the IoT SiteWise User Guide. This option is only available in Amazon Web Services + Regions other than the China Regions. IAM – The portal uses Identity and Access + Management to authenticate users and manage user permissions. You can't change this value + after you create a portal. Default: SSO - `"portalDescription"`: A description for the portal. - `"portalLogoImageFile"`: A logo image to display in the portal. Upload a square, high-resolution image. The image is displayed on a dark background. @@ -980,11 +1118,13 @@ end delete_asset(asset_id, params::Dict{String,<:Any}) Deletes an asset. This action can't be undone. For more information, see Deleting assets -and models in the IoT SiteWise User Guide. You can't delete an asset that's associated to +and models in the IoT SiteWise User Guide. You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets. # Arguments -- `asset_id`: The ID of the asset to delete. +- `asset_id`: The ID of the asset to delete. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1026,7 +1166,9 @@ the asset model that you want to delete. For more information, see Deleting asse models in the IoT SiteWise User Guide. # Arguments -- `asset_model_id`: The ID of the asset model to delete. +- `asset_model_id`: The ID of the asset model to delete. This can be either the actual ID + in UUID format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1059,6 +1201,56 @@ function delete_asset_model( ) end +""" + delete_asset_model_composite_model(asset_model_composite_model_id, asset_model_id) + delete_asset_model_composite_model(asset_model_composite_model_id, asset_model_id, params::Dict{String,<:Any}) + +Deletes a composite model. This action can't be undone. You must delete all assets created +from a composite model before you can delete the model. Also, you can't delete a composite +model if a parent asset model exists that contains a property formula expression that +depends on the asset model that you want to delete. For more information, see Deleting +assets and models in the IoT SiteWise User Guide. + +# Arguments +- `asset_model_composite_model_id`: The ID of a composite model on this asset model. +- `asset_model_id`: The ID of the asset model, in UUID format. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the + idempotency of the request. Don't reuse this client token if a new idempotent request is + required. +""" +function delete_asset_model_composite_model( + assetModelCompositeModelId, + assetModelId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "DELETE", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset_model_composite_model( + assetModelCompositeModelId, + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "DELETE", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_dashboard(dashboard_id) delete_dashboard(dashboard_id, params::Dict{String,<:Any}) @@ -1227,11 +1419,16 @@ the asset property. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"alias"`: The alias that identifies the time series. -- `"assetId"`: The ID of the asset in which the asset property was created. +- `"assetId"`: The ID of the asset in which the asset property was created. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. -- `"propertyId"`: The ID of the asset property. +- `"propertyId"`: The ID of the asset property. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. """ function delete_time_series(; aws_config::AbstractAWSConfig=global_aws_config()) return iotsitewise( @@ -1291,6 +1488,38 @@ function describe_access_policy( ) end +""" + describe_action(action_id) + describe_action(action_id, params::Dict{String,<:Any}) + +Retrieves information about an action. + +# Arguments +- `action_id`: The ID of the action. + +""" +function describe_action(actionId; aws_config::AbstractAWSConfig=global_aws_config()) + return iotsitewise( + "GET", + "/actions/$(actionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_action( + actionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/actions/$(actionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_asset(asset_id) describe_asset(asset_id, params::Dict{String,<:Any}) @@ -1298,7 +1527,9 @@ end Retrieves information about an asset. # Arguments -- `asset_id`: The ID of the asset. +- `asset_id`: The ID of the asset. This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1321,6 +1552,49 @@ function describe_asset( ) end +""" + describe_asset_composite_model(asset_composite_model_id, asset_id) + describe_asset_composite_model(asset_composite_model_id, asset_id, params::Dict{String,<:Any}) + +Retrieves information about an asset composite model (also known as an asset component). An +AssetCompositeModel is an instance of an AssetModelCompositeModel. If you want to see +information about the model this is based on, call DescribeAssetModelCompositeModel. + +# Arguments +- `asset_composite_model_id`: The ID of a composite model on this asset. This can be either + the actual ID in UUID format, or else externalId: followed by the external ID, if it has + one. For more information, see Referencing objects with external IDs in the IoT SiteWise + User Guide. +- `asset_id`: The ID of the asset. This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. + +""" +function describe_asset_composite_model( + assetCompositeModelId, assetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotsitewise( + "GET", + "/assets/$(assetId)/composite-models/$(assetCompositeModelId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_asset_composite_model( + assetCompositeModelId, + assetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/assets/$(assetId)/composite-models/$(assetCompositeModelId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_asset_model(asset_model_id) describe_asset_model(asset_model_id, params::Dict{String,<:Any}) @@ -1328,7 +1602,9 @@ end Retrieves information about an asset model. # Arguments -- `asset_model_id`: The ID of the asset model. +- `asset_model_id`: The ID of the asset model. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1359,6 +1635,51 @@ function describe_asset_model( ) end +""" + describe_asset_model_composite_model(asset_model_composite_model_id, asset_model_id) + describe_asset_model_composite_model(asset_model_composite_model_id, asset_model_id, params::Dict{String,<:Any}) + +Retrieves information about an asset model composite model (also known as an asset model +component). For more information, see Custom composite models (Components) in the IoT +SiteWise User Guide. + +# Arguments +- `asset_model_composite_model_id`: The ID of a composite model on this asset model. This + can be either the actual ID in UUID format, or else externalId: followed by the external + ID, if it has one. For more information, see Referencing objects with external IDs in the + IoT SiteWise User Guide. +- `asset_model_id`: The ID of the asset model. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. + +""" +function describe_asset_model_composite_model( + assetModelCompositeModelId, + assetModelId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_asset_model_composite_model( + assetModelCompositeModelId, + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_asset_property(asset_id, property_id) describe_asset_property(asset_id, property_id, params::Dict{String,<:Any}) @@ -1370,8 +1691,12 @@ includes the new default value. This operation doesn't return the value of the property. To get the value of an asset property, use GetAssetPropertyValue. # Arguments -- `asset_id`: The ID of the asset. -- `property_id`: The ID of the asset property. +- `asset_id`: The ID of the asset. This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. +- `property_id`: The ID of the asset property. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. """ function describe_asset_property( @@ -1689,8 +2014,13 @@ that identifies the asset property. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"alias"`: The alias that identifies the time series. -- `"assetId"`: The ID of the asset in which the asset property was created. -- `"propertyId"`: The ID of the asset property. +- `"assetId"`: The ID of the asset in which the asset property was created. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. +- `"propertyId"`: The ID of the asset property. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. """ function describe_time_series(; aws_config::AbstractAWSConfig=global_aws_config()) return iotsitewise( @@ -1720,12 +2050,20 @@ Disassociates a child asset from the given parent asset through a hierarchy defi parent asset's model. # Arguments -- `asset_id`: The ID of the parent asset from which to disassociate the child asset. -- `child_asset_id`: The ID of the child asset to disassociate. -- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. Hierarchies allow - different groupings of assets to be formed that all come from the same asset model. You can - use the hierarchy ID to identify the correct asset to disassociate. For more information, - see Asset hierarchies in the IoT SiteWise User Guide. +- `asset_id`: The ID of the parent asset from which to disassociate the child asset. This + can be either the actual ID in UUID format, or else externalId: followed by the external + ID, if it has one. For more information, see Referencing objects with external IDs in the + IoT SiteWise User Guide. +- `child_asset_id`: The ID of the child asset to disassociate. This can be either the + actual ID in UUID format, or else externalId: followed by the external ID, if it has one. + For more information, see Referencing objects with external IDs in the IoT SiteWise User + Guide. +- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. (This can be either + the actual ID in UUID format, or else externalId: followed by the external ID, if it has + one. For more information, see Referencing objects with external IDs in the IoT SiteWise + User Guide.) Hierarchies allow different groupings of assets to be formed that all come + from the same asset model. You can use the hierarchy ID to identify the correct asset to + disassociate. For more information, see Asset hierarchies in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1782,8 +2120,13 @@ Disassociates a time series (data stream) from an asset property. # Arguments - `alias`: The alias that identifies the time series. -- `asset_id`: The ID of the asset in which the asset property was created. -- `property_id`: The ID of the asset property. +- `asset_id`: The ID of the asset in which the asset property was created. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. +- `property_id`: The ID of the asset property. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1834,6 +2177,107 @@ function disassociate_time_series_from_asset_property( ) end +""" + execute_action(action_definition_id, action_payload, target_resource) + execute_action(action_definition_id, action_payload, target_resource, params::Dict{String,<:Any}) + +Executes an action on a target resource. + +# Arguments +- `action_definition_id`: The ID of the action definition. +- `action_payload`: The JSON payload of the action. +- `target_resource`: The resource the action will be taken on. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the + idempotency of the request. Don't reuse this client token if a new idempotent request is + required. +""" +function execute_action( + actionDefinitionId, + actionPayload, + targetResource; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "POST", + "/actions", + Dict{String,Any}( + "actionDefinitionId" => actionDefinitionId, + "actionPayload" => actionPayload, + "targetResource" => targetResource, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_action( + actionDefinitionId, + actionPayload, + targetResource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "POST", + "/actions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "actionDefinitionId" => actionDefinitionId, + "actionPayload" => actionPayload, + "targetResource" => targetResource, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_query(query_statement) + execute_query(query_statement, params::Dict{String,<:Any}) + +Run SQL queries to retrieve metadata and time-series data from asset models, assets, +measurements, metrics, transforms, and aggregates. + +# Arguments +- `query_statement`: The IoT SiteWise query statement. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return at one time. The default is 25. +- `"nextToken"`: The string that specifies the next page of results. +""" +function execute_query(queryStatement; aws_config::AbstractAWSConfig=global_aws_config()) + return iotsitewise( + "POST", + "/queries/execution", + Dict{String,Any}("queryStatement" => queryStatement); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_query( + queryStatement, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "POST", + "/queries/execution", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("queryStatement" => queryStatement), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_asset_property_aggregates(aggregate_types, end_date, resolution, start_date) get_asset_property_aggregates(aggregate_types, end_date, resolution, start_date, params::Dict{String,<:Any}) @@ -1854,16 +2298,16 @@ asset property's alias, see UpdateAssetProperty. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetId"`: The ID of the asset. +- `"assetId"`: The ID of the asset, in UUID format. - `"maxResults"`: The maximum number of results to return for each paginated request. A result set is returned in the two cases, whichever occurs first. The size of the result set is equal to 1 MB. The number of data points in the result set is equal to the value - of maxResults. The maximum value of maxResults is 250. + of maxResults. The maximum value of maxResults is 2500. - `"nextToken"`: The token to be used for the next set of paginated results. - `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide. -- `"propertyId"`: The ID of the asset property. +- `"propertyId"`: The ID of the asset property, in UUID format. - `"qualities"`: The quality by which to filter asset data. - `"timeOrdering"`: The chronological sorting order of the requested information. Default: ASCENDING @@ -1928,11 +2372,11 @@ asset property's alias, see UpdateAssetProperty. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetId"`: The ID of the asset. +- `"assetId"`: The ID of the asset, in UUID format. - `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide. -- `"propertyId"`: The ID of the asset property. +- `"propertyId"`: The ID of the asset property, in UUID format. """ function get_asset_property_value(; aws_config::AbstractAWSConfig=global_aws_config()) return iotsitewise( @@ -1964,7 +2408,7 @@ UpdateAssetProperty. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetId"`: The ID of the asset. +- `"assetId"`: The ID of the asset, in UUID format. - `"endDate"`: The inclusive end of the range from which to query historical data, expressed in seconds in Unix epoch time. - `"maxResults"`: The maximum number of results to return for each paginated request. A @@ -1975,7 +2419,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide. -- `"propertyId"`: The ID of the asset property. +- `"propertyId"`: The ID of the asset property, in UUID format. - `"qualities"`: The quality by which to filter asset data. - `"startDate"`: The exclusive start of the range from which to query historical data, expressed in seconds in Unix epoch time. @@ -2040,7 +2484,7 @@ asset property's alias, see UpdateAssetProperty. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetId"`: The ID of the asset. +- `"assetId"`: The ID of the asset, in UUID format. - `"endTimeOffsetInNanos"`: The nanosecond offset converted from endTimeInSeconds. - `"intervalWindowInSeconds"`: The query interval for the window, in seconds. IoT SiteWise computes each interpolated value by using data points from the timestamp of each interval, @@ -2062,7 +2506,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide. -- `"propertyId"`: The ID of the asset property. +- `"propertyId"`: The ID of the asset property, in UUID format. - `"startTimeOffsetInNanos"`: The nanosecond offset converted from startTimeInSeconds. """ function get_interpolated_asset_property_values( @@ -2158,6 +2602,100 @@ function list_access_policies( ) end +""" + list_actions(target_resource_id, target_resource_type) + list_actions(target_resource_id, target_resource_type, params::Dict{String,<:Any}) + +Retrieves a paginated list of actions for a specific target resource. + +# Arguments +- `target_resource_id`: The ID of the target resource. +- `target_resource_type`: The type of resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return for each paginated request. +- `"nextToken"`: The token to be used for the next set of paginated results. +""" +function list_actions( + targetResourceId, targetResourceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotsitewise( + "GET", + "/actions", + Dict{String,Any}( + "targetResourceId" => targetResourceId, + "targetResourceType" => targetResourceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_actions( + targetResourceId, + targetResourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/actions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "targetResourceId" => targetResourceId, + "targetResourceType" => targetResourceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_asset_model_composite_models(asset_model_id) + list_asset_model_composite_models(asset_model_id, params::Dict{String,<:Any}) + +Retrieves a paginated list of composite models associated with the asset model + +# Arguments +- `asset_model_id`: The ID of the asset model. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return for each paginated request. + Default: 50 +- `"nextToken"`: The token to be used for the next set of paginated results. +""" +function list_asset_model_composite_models( + assetModelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composite-models"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_model_composite_models( + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composite-models", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_asset_model_properties(asset_model_id) list_asset_model_properties(asset_model_id, params::Dict{String,<:Any}) @@ -2167,7 +2705,9 @@ properties associated with the model before you finish listing all the propertie to start all over again. # Arguments -- `asset_model_id`: The ID of the asset model. +- `asset_model_id`: The ID of the asset model. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2211,6 +2751,10 @@ Retrieves a paginated list of summaries of all asset models. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelTypes"`: The type of asset model. ASSET_MODEL – (default) An asset model + that you can use to create assets. Can't be included as a component in another asset model. + COMPONENT_MODEL – A reusable component that you can include in the composite models of + other asset models. You can't create assets directly from this type of asset model. - `"maxResults"`: The maximum number of results to return for each paginated request. Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. @@ -2241,7 +2785,9 @@ associated with the model before you finish listing all the properties, you need all over again. # Arguments -- `asset_id`: The ID of the asset. +- `asset_id`: The ID of the asset. This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2281,7 +2827,9 @@ Retrieves a paginated list of asset relationships for an asset. You can use this to identify an asset's root asset and all associated assets between that asset and its root. # Arguments -- `asset_id`: The ID of the asset. +- `asset_id`: The ID of the asset. This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide. - `traversal_type`: The type of traversal to use to identify asset relationships. Choose the following option: PATH_TO_ROOT – Identify the asset's parent assets up to the root asset. The asset that you specify in assetId is the first result in the list of @@ -2333,7 +2881,9 @@ assets for each asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"assetModelId"`: The ID of the asset model by which to filter the list of assets. This - parameter is required if you choose ALL for filter. + parameter is required if you choose ALL for filter. This can be either the actual ID in + UUID format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. - `"filter"`: The filter for the requested list of assets. Choose one of the following options: ALL – The list includes all assets for a given asset model ID. The assetModelId parameter is required if you filter by ALL. TOP_LEVEL – The list includes @@ -2364,14 +2914,19 @@ following: List child assets associated to a parent asset by a hierarchy that specify. List an asset's parent asset. # Arguments -- `asset_id`: The ID of the asset to query. +- `asset_id`: The ID of the asset to query. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"hierarchyId"`: The ID of the hierarchy by which child assets are associated to the - asset. To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This - parameter is required if you choose CHILD for traversalDirection. For more information, see - Asset hierarchies in the IoT SiteWise User Guide. + asset. (This can be either the actual ID in UUID format, or else externalId: followed by + the external ID, if it has one. For more information, see Referencing objects with external + IDs in the IoT SiteWise User Guide.) To find a hierarchy ID, use the DescribeAsset or + DescribeAssetModel operations. This parameter is required if you choose CHILD for + traversalDirection. For more information, see Asset hierarchies in the IoT SiteWise User + Guide. - `"maxResults"`: The maximum number of results to return for each paginated request. Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. @@ -2426,6 +2981,48 @@ function list_bulk_import_jobs( ) end +""" + list_composition_relationships(asset_model_id) + list_composition_relationships(asset_model_id, params::Dict{String,<:Any}) + +Retrieves a paginated list of composition relationships for an asset model of type +COMPONENT_MODEL. + +# Arguments +- `asset_model_id`: The ID of the asset model. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return for each paginated request. + Default: 50 +- `"nextToken"`: The token to be used for the next set of paginated results. +""" +function list_composition_relationships( + assetModelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composition-relationships"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_composition_relationships( + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "GET", + "/asset-models/$(assetModelId)/composition-relationships", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_dashboards(project_id) list_dashboards(project_id, params::Dict{String,<:Any}) @@ -2643,7 +3240,10 @@ Retrieves a paginated list of time series (data streams). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"aliasPrefix"`: The alias prefix of the time series. -- `"assetId"`: The ID of the asset in which the asset property was created. +- `"assetId"`: The ID of the asset in which the asset property was created. This can be + either the actual ID in UUID format, or else externalId: followed by the external ID, if it + has one. For more information, see Referencing objects with external IDs in the IoT + SiteWise User Guide. - `"maxResults"`: The maximum number of results to return for each paginated request. - `"nextToken"`: The token to be used for the next set of paginated results. - `"timeSeriesType"`: The type of the time series. The time series type can be one of the @@ -2771,6 +3371,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"multiLayerStorage"`: Identifies a storage destination. If you specified MULTI_LAYER_STORAGE for the storage type, you must specify a MultiLayerStorage object. - `"retentionPeriod"`: +- `"warmTier"`: A service managed storage tier optimized for analytical queries. It stores + periodically uploaded, buffered and historical data ingested with the CreaeBulkImportJob + API. +- `"warmTierRetentionPeriod"`: Set this period to specify how long your data is stored in + the warm tier before it is deleted. You can set this only if cold tier is enabled. """ function put_storage_configuration( storageType; aws_config::AbstractAWSConfig=global_aws_config() @@ -2963,12 +3568,17 @@ Updates an asset's name. For more information, see Updating assets and models in SiteWise User Guide. # Arguments -- `asset_id`: The ID of the asset to update. +- `asset_id`: The ID of the asset to update. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. - `asset_name`: A friendly name for the asset. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"assetDescription"`: A description for the asset. +- `"assetExternalId"`: An external ID to assign to the asset. The asset must not already + have an external ID. The external ID must be unique within your Amazon Web Services + account. For more information, see Using external IDs in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -3021,16 +3631,24 @@ IoT SiteWise disassociates every asset associated with that hierarchy. You can't type or data type of an existing property. # Arguments -- `asset_model_id`: The ID of the asset model to update. +- `asset_model_id`: The ID of the asset model to update. This can be either the actual ID + in UUID format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. - `asset_model_name`: A unique, friendly name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetModelCompositeModels"`: The composite asset models that are part of this asset - model. Composite asset models are asset models that contain specific properties. Each - composite model has a type that defines the properties that the composite model supports. - Use composite asset models to define alarms on this asset model. +- `"assetModelCompositeModels"`: The composite models that are part of this asset model. It + groups properties (such as attributes, measurements, transforms, and metrics) and child + composite models that model parts of your industrial equipment. Each composite model has a + type that defines the properties that the composite model supports. Use composite models to + define alarms on this asset model. When creating custom composite models, you need to use + CreateAssetModelCompositeModel. For more information, see <LINK>. - `"assetModelDescription"`: A description for the asset model. +- `"assetModelExternalId"`: An external ID to assign to the asset model. The asset model + must not already have an external ID. The external ID must be unique within your Amazon Web + Services account. For more information, see Using external IDs in the IoT SiteWise User + Guide. - `"assetModelHierarchies"`: The updated hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset hierarchies in the IoT SiteWise User @@ -3079,6 +3697,81 @@ function update_asset_model( ) end +""" + update_asset_model_composite_model(asset_model_composite_model_id, asset_model_composite_model_name, asset_model_id) + update_asset_model_composite_model(asset_model_composite_model_id, asset_model_composite_model_name, asset_model_id, params::Dict{String,<:Any}) + +Updates a composite model and all of the assets that were created from the model. Each +asset created from the model inherits the updated asset model's property and hierarchy +definitions. For more information, see Updating assets and models in the IoT SiteWise User +Guide. If you remove a property from a composite asset model, IoT SiteWise deletes all +previous data for that property. You can’t change the type or data type of an existing +property. To replace an existing composite asset model property with a new one with the +same name, do the following: Submit an UpdateAssetModelCompositeModel request with the +entire existing property removed. Submit a second UpdateAssetModelCompositeModel request +that includes the new property. The new asset property will have the same name as the +previous one and IoT SiteWise will generate a new unique id. + +# Arguments +- `asset_model_composite_model_id`: The ID of a composite model on this asset model. +- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_id`: The ID of the asset model, in UUID format. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelCompositeModelDescription"`: A description for the composite model. +- `"assetModelCompositeModelExternalId"`: An external ID to assign to the asset model. You + can only set the external ID of the asset model if it wasn't set when it was created, or + you're setting it to the exact same thing as when it was created. +- `"assetModelCompositeModelProperties"`: The property definitions of the composite model. + For more information, see <LINK>. You can specify up to 200 properties per composite + model. For more information, see Quotas in the IoT SiteWise User Guide. +- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the + idempotency of the request. Don't reuse this client token if a new idempotent request is + required. +""" +function update_asset_model_composite_model( + assetModelCompositeModelId, + assetModelCompositeModelName, + assetModelId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "PUT", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)", + Dict{String,Any}( + "assetModelCompositeModelName" => assetModelCompositeModelName, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_asset_model_composite_model( + assetModelCompositeModelId, + assetModelCompositeModelName, + assetModelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iotsitewise( + "PUT", + "/asset-models/$(assetModelId)/composite-models/$(assetModelCompositeModelId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "assetModelCompositeModelName" => assetModelCompositeModelName, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_asset_property(asset_id, property_id) update_asset_property(asset_id, property_id, params::Dict{String,<:Any}) @@ -3089,8 +3782,12 @@ notification state, you must include the existing values in the UpdateAssetPrope request. For more information, see DescribeAssetProperty. # Arguments -- `asset_id`: The ID of the asset to be updated. -- `property_id`: The ID of the asset property to be updated. +- `asset_id`: The ID of the asset to be updated. This can be either the actual ID in UUID + format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. +- `property_id`: The ID of the asset property to be updated. This can be either the actual + ID in UUID format, or else externalId: followed by the external ID, if it has one. For more + information, see Referencing objects with external IDs in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/iottwinmaker.jl b/src/services/iottwinmaker.jl index f2741e89d7..c842e1732d 100644 --- a/src/services/iottwinmaker.jl +++ b/src/services/iottwinmaker.jl @@ -42,6 +42,40 @@ function batch_put_property_values( ) end +""" + cancel_metadata_transfer_job(metadata_transfer_job_id) + cancel_metadata_transfer_job(metadata_transfer_job_id, params::Dict{String,<:Any}) + +Cancels the metadata transfer job. + +# Arguments +- `metadata_transfer_job_id`: The metadata transfer job Id. + +""" +function cancel_metadata_transfer_job( + metadataTransferJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "PUT", + "/metadata-transfer-jobs/$(metadataTransferJobId)/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_metadata_transfer_job( + metadataTransferJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "PUT", + "/metadata-transfer-jobs/$(metadataTransferJobId)/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_component_type(component_type_id, workspace_id) create_component_type(component_type_id, workspace_id, params::Dict{String,<:Any}) @@ -55,6 +89,9 @@ Creates a component type. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"componentTypeName"`: A friendly name for the component type. +- `"compositeComponentTypes"`: This is an object that maps strings to + compositeComponentTypes of the componentType. CompositeComponentType is referenced by + componentTypeId. - `"description"`: The description of the component type. - `"extendsFrom"`: Specifies the parent component type to extend. - `"functions"`: An object that maps strings to the functions in the component type. Each @@ -105,6 +142,9 @@ Creates an entity. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"components"`: An object that maps strings to the components in the entity. Each string in the mapping must be unique to this object. +- `"compositeComponents"`: This is an object that maps strings to compositeComponent + updates in the request. Each key of the map represents the componentPath of the + compositeComponent. - `"description"`: The description of the entity. - `"entityId"`: The ID of the entity. - `"parentEntityId"`: The ID of the entity's parent entity. @@ -138,6 +178,53 @@ function create_entity( ) end +""" + create_metadata_transfer_job(destination, sources) + create_metadata_transfer_job(destination, sources, params::Dict{String,<:Any}) + +Creates a new metadata transfer job. + +# Arguments +- `destination`: The metadata transfer job destination. +- `sources`: The metadata transfer job sources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The metadata transfer job description. +- `"metadataTransferJobId"`: The metadata transfer job Id. +""" +function create_metadata_transfer_job( + destination, sources; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "POST", + "/metadata-transfer-jobs", + Dict{String,Any}("destination" => destination, "sources" => sources); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_metadata_transfer_job( + destination, + sources, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "POST", + "/metadata-transfer-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("destination" => destination, "sources" => sources), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_scene(content_location, scene_id, workspace_id) create_scene(content_location, scene_id, workspace_id, params::Dict{String,<:Any}) @@ -238,36 +325,31 @@ function create_sync_job( end """ - create_workspace(role, s3_location, workspace_id) - create_workspace(role, s3_location, workspace_id, params::Dict{String,<:Any}) + create_workspace(workspace_id) + create_workspace(workspace_id, params::Dict{String,<:Any}) Creates a workplace. # Arguments -- `role`: The ARN of the execution role associated with the workspace. -- `s3_location`: The ARN of the S3 bucket where resources associated with the workspace are - stored. - `workspace_id`: The ID of the workspace. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: The description of the workspace. +- `"role"`: The ARN of the execution role associated with the workspace. +- `"s3Location"`: The ARN of the S3 bucket where resources associated with the workspace + are stored. - `"tags"`: Metadata that you can use to manage the workspace """ -function create_workspace( - role, s3Location, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() -) +function create_workspace(workspaceId; aws_config::AbstractAWSConfig=global_aws_config()) return iottwinmaker( "POST", - "/workspaces/$(workspaceId)", - Dict{String,Any}("role" => role, "s3Location" => s3Location); + "/workspaces/$(workspaceId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_workspace( - role, - s3Location, workspaceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -275,11 +357,7 @@ function create_workspace( return iottwinmaker( "POST", "/workspaces/$(workspaceId)", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("role" => role, "s3Location" => s3Location), params - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -470,7 +548,8 @@ end execute_query(query_statement, workspace_id, params::Dict{String,<:Any}) Run queries to access information from your knowledge graph of entities within individual -workspaces. +workspaces. The ExecuteQuery action only works with Amazon Web Services Java SDK2. +ExecuteQuery will not work with any Amazon Web Services Java SDK version < 2.x. # Arguments - `query_statement`: The query statement. @@ -478,8 +557,7 @@ workspaces. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to return at one time. The default is 25. - Valid Range: Minimum value of 1. Maximum value of 250. +- `"maxResults"`: The maximum number of results to return at one time. The default is 50. - `"nextToken"`: The string that specifies the next page of results. """ function execute_query( @@ -588,6 +666,40 @@ function get_entity( ) end +""" + get_metadata_transfer_job(metadata_transfer_job_id) + get_metadata_transfer_job(metadata_transfer_job_id, params::Dict{String,<:Any}) + +Gets a nmetadata transfer job. + +# Arguments +- `metadata_transfer_job_id`: The metadata transfer job Id. + +""" +function get_metadata_transfer_job( + metadataTransferJobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "GET", + "/metadata-transfer-jobs/$(metadataTransferJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_metadata_transfer_job( + metadataTransferJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "GET", + "/metadata-transfer-jobs/$(metadataTransferJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_pricing_plan() get_pricing_plan(params::Dict{String,<:Any}) @@ -626,6 +738,8 @@ specify a value for either componentName, componentTypeId, entityId, or workspac # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"componentName"`: The name of the component whose property values the operation returns. +- `"componentPath"`: This string specifies the path to the composite component, starting + from the top-level component. - `"componentTypeId"`: The ID of the component type whose property values the operation returns. - `"entityId"`: The ID of the entity whose property values the operation returns. @@ -681,6 +795,8 @@ quries, specify a value for componentTypeId. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"componentName"`: The name of the component. +- `"componentPath"`: This string specifies the path to the composite component, starting + from the top-level component. - `"componentTypeId"`: The ID of the component type. - `"endDateTime"`: The date and time of the latest property value to return. - `"endTime"`: The ISO8601 DateTime of the latest property value to return. For more @@ -868,6 +984,49 @@ function list_component_types( ) end +""" + list_components(entity_id, workspace_id) + list_components(entity_id, workspace_id, params::Dict{String,<:Any}) + +This API lists the components of an entity. + +# Arguments +- `entity_id`: The ID for the entity whose metadata (component/properties) is returned by + the operation. +- `workspace_id`: The workspace ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentPath"`: This string specifies the path to the composite component, starting + from the top-level component. +- `"maxResults"`: The maximum number of results returned at one time. The default is 25. +- `"nextToken"`: The string that specifies the next page of results. +""" +function list_components( + entityId, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "POST", + "/workspaces/$(workspaceId)/entities/$(entityId)/components-list"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_components( + entityId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "POST", + "/workspaces/$(workspaceId)/entities/$(entityId)/components-list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_entities(workspace_id) list_entities(workspace_id, params::Dict{String,<:Any}) @@ -907,6 +1066,104 @@ function list_entities( ) end +""" + list_metadata_transfer_jobs(destination_type, source_type) + list_metadata_transfer_jobs(destination_type, source_type, params::Dict{String,<:Any}) + +Lists the metadata transfer jobs. + +# Arguments +- `destination_type`: The metadata transfer job's destination type. +- `source_type`: The metadata transfer job's source type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: An object that filters metadata transfer jobs. +- `"maxResults"`: The maximum number of results to return at one time. +- `"nextToken"`: The string that specifies the next page of results. +""" +function list_metadata_transfer_jobs( + destinationType, sourceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "POST", + "/metadata-transfer-jobs-list", + Dict{String,Any}("destinationType" => destinationType, "sourceType" => sourceType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_metadata_transfer_jobs( + destinationType, + sourceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "POST", + "/metadata-transfer-jobs-list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinationType" => destinationType, "sourceType" => sourceType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_properties(entity_id, workspace_id) + list_properties(entity_id, workspace_id, params::Dict{String,<:Any}) + +This API lists the properties of a component. + +# Arguments +- `entity_id`: The ID for the entity whose metadata (component/properties) is returned by + the operation. +- `workspace_id`: The workspace ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentName"`: The name of the component whose properties are returned by the + operation. +- `"componentPath"`: This string specifies the path to the composite component, starting + from the top-level component. +- `"maxResults"`: The maximum number of results returned at one time. The default is 25. +- `"nextToken"`: The string that specifies the next page of results. +""" +function list_properties( + entityId, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return iottwinmaker( + "POST", + "/workspaces/$(workspaceId)/properties-list", + Dict{String,Any}("entityId" => entityId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_properties( + entityId, + workspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iottwinmaker( + "POST", + "/workspaces/$(workspaceId)/properties-list", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("entityId" => entityId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_scenes(workspace_id) list_scenes(workspace_id, params::Dict{String,<:Any}) @@ -1193,6 +1450,9 @@ Updates information in a component type. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"componentTypeName"`: The component type name. +- `"compositeComponentTypes"`: This is an object that maps strings to + compositeComponentTypes of the componentType. CompositeComponentType is referenced by + componentTypeId. - `"description"`: The description of the component type. - `"extendsFrom"`: Specifies the component type that this component type extends. - `"functions"`: An object that maps strings to the functions in the component type. Each @@ -1242,6 +1502,9 @@ Updates an entity. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"componentUpdates"`: An object that maps strings to the component updates in the request. Each string in the mapping must be unique to this object. +- `"compositeComponentUpdates"`: This is an object that maps strings to compositeComponent + updates in the request. Each key of the map represents the componentPath of the + compositeComponent. - `"description"`: The description of the entity. - `"entityName"`: The name of the entity. - `"parentEntityUpdate"`: An object that describes the update request for a parent entity. @@ -1365,6 +1628,8 @@ Updates a workspace. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: The description of the workspace. - `"role"`: The ARN of the execution role associated with the workspace. +- `"s3Location"`: The ARN of the S3 bucket where resources associated with the workspace + are stored. """ function update_workspace(workspaceId; aws_config::AbstractAWSConfig=global_aws_config()) return iottwinmaker( diff --git a/src/services/ivs.jl b/src/services/ivs.jl index eefc45caa5..87bd5258d9 100644 --- a/src/services/ivs.jl +++ b/src/services/ivs.jl @@ -66,6 +66,44 @@ function batch_get_stream_key( ) end +""" + batch_start_viewer_session_revocation(viewer_sessions) + batch_start_viewer_session_revocation(viewer_sessions, params::Dict{String,<:Any}) + +Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs +simultaneously. + +# Arguments +- `viewer_sessions`: Array of viewer sessions, one per channel-ARN and viewer-ID pair. + +""" +function batch_start_viewer_session_revocation( + viewerSessions; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/BatchStartViewerSessionRevocation", + Dict{String,Any}("viewerSessions" => viewerSessions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_start_viewer_session_revocation( + viewerSessions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs( + "POST", + "/BatchStartViewerSessionRevocation", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("viewerSessions" => viewerSessions), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_channel() create_channel(params::Dict{String,<:Any}) @@ -76,52 +114,28 @@ Creates a new channel and an associated stream key to start streaming. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authorized"`: Whether the channel is private (enabled for playback authorization). Default: false. -- `"insecureIngest"`: Whether the channel allows insecure RTMP ingest. Default: false. +- `"insecureIngest"`: Whether the channel allows insecure RTMP and SRT ingest. Default: + false. - `"latencyMode"`: Channel latency mode. Use NORMAL to broadcast and deliver live video up - to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS - console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.) Default: LOW. + to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW. - `"name"`: Channel name. +- `"playbackRestrictionPolicyArn"`: Playback-restriction-policy ARN. A valid ARN value here + both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no + playback restriction policy is applied). - `"preset"`: Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\"). -- `"recordingConfigurationArn"`: Recording-configuration ARN. Default: \"\" (empty string, - recording is disabled). +- `"recordingConfigurationArn"`: Recording-configuration ARN. A valid ARN value here both + specifies the ARN and enables recording. Default: \"\" (empty string, recording is + disabled). - `"tags"`: Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there. - `"type"`: Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect - immediately. Some types generate multiple qualities (renditions) from the original input; - this automatically gives viewers the best experience for their devices and network - conditions. Some types provide transcoded video; transcoding allows higher playback quality - across a range of download speeds. Default: STANDARD. Valid values: BASIC: Video is - transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s - video-quality choice is limited to the original input. Input resolution can be up to 1080p - and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p - and 1080p. Original audio is passed through. STANDARD: Video is transcoded: multiple - qualities are generated from the original input, to automatically give viewers the best - experience for their devices and network conditions. Transcoding allows higher playback - quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be - up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio - is passed through. This is the default when you create a channel. ADVANCED_SD: Video is - transcoded; multiple qualities are generated from the original input, to automatically give - viewers the best experience for their devices and network conditions. Input resolution can - be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). - You can select an optional transcode preset (see below). Audio for all renditions is - transcoded, and an audio-only rendition is available. ADVANCED_HD: Video is transcoded; - multiple qualities are generated from the original input, to automatically give viewers the - best experience for their devices and network conditions. Input resolution can be up to - 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can - select an optional transcode preset (see below). Audio for all renditions is transcoded, - and an audio-only rendition is available. Optional transcode presets (available for the - ADVANCED types) allow you to trade off available download bandwidth and video quality, to - optimize the viewing experience. There are two presets: Constrained bandwidth delivery - uses a lower bitrate for each quality level. Use it if you have low download bandwidth - and/or simple video content (e.g., talking heads) Higher bandwidth delivery uses a - higher bitrate for each quality level. Use it if you have high download bandwidth and/or - complex video content (e.g., flashes and quick scene changes). + immediately. Default: STANDARD. For details, see Channel Types. """ function create_channel(; aws_config::AbstractAWSConfig=global_aws_config()) return ivs( @@ -140,6 +154,52 @@ function create_channel( ) end +""" + create_playback_restriction_policy() + create_playback_restriction_policy(params::Dict{String,<:Any}) + +Creates a new playback restriction policy, for constraining playback by countries and/or +origins. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"allowedCountries"`: A list of country codes that control geoblocking restriction. + Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries + (an empty array). +- `"allowedOrigins"`: A list of origin sites that control CORS restriction. Allowed values + are the same as valid values of the Origin header defined at + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an + empty array). +- `"enableStrictOriginEnforcement"`: Whether channel playback is constrained by origin + site. Default: false. +- `"name"`: Playback-restriction-policy name. The value does not need to be unique. +- `"tags"`: Array of 1-50 maps, each of the form string:string (key:value). See Tagging + Amazon Web Services Resources for more information, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific + constraints beyond what is documented there. +""" +function create_playback_restriction_policy(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/CreatePlaybackRestrictionPolicy"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_playback_restriction_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/CreatePlaybackRestrictionPolicy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_recording_configuration(destination_configuration) create_recording_configuration(destination_configuration, params::Dict{String,<:Any}) @@ -164,6 +224,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"recordingReconnectWindowSeconds"`: If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0. +- `"renditionConfiguration"`: Object that describes which renditions should be recorded for + a stream. - `"tags"`: Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific @@ -316,6 +378,39 @@ function delete_playback_key_pair( ) end +""" + delete_playback_restriction_policy(arn) + delete_playback_restriction_policy(arn, params::Dict{String,<:Any}) + +Deletes the specified playback restriction policy. + +# Arguments +- `arn`: ARN of the playback restriction policy to be deleted. + +""" +function delete_playback_restriction_policy( + arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/DeletePlaybackRestrictionPolicy", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_playback_restriction_policy( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/DeletePlaybackRestrictionPolicy", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_recording_configuration(arn) delete_recording_configuration(arn, params::Dict{String,<:Any}) @@ -449,6 +544,39 @@ function get_playback_key_pair( ) end +""" + get_playback_restriction_policy(arn) + get_playback_restriction_policy(arn, params::Dict{String,<:Any}) + +Gets the specified playback restriction policy. + +# Arguments +- `arn`: ARN of the playback restriction policy to be returned. + +""" +function get_playback_restriction_policy( + arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/GetPlaybackRestrictionPolicy", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_playback_restriction_policy( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/GetPlaybackRestrictionPolicy", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_recording_configuration(arn) get_recording_configuration(arn, params::Dict{String,<:Any}) @@ -647,6 +775,8 @@ together. If you try to use both filters, you will get an error (409 ConflictExc # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filterByName"`: Filters the channel list to match the specified name. +- `"filterByPlaybackRestrictionPolicyArn"`: Filters the channel list to match the specified + policy. - `"filterByRecordingConfigurationArn"`: Filters the channel list to match the specified recording-configuration ARN. - `"maxResults"`: Maximum number of channels to return. Default: 100. @@ -704,6 +834,40 @@ function list_playback_key_pairs( ) end +""" + list_playback_restriction_policies() + list_playback_restriction_policies(params::Dict{String,<:Any}) + +Gets summary information about playback restriction policies. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of policies to return. Default: 1. +- `"nextToken"`: The first policy to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_playback_restriction_policies(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/ListPlaybackRestrictionPolicies"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_playback_restriction_policies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/ListPlaybackRestrictionPolicies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_recording_configurations() list_recording_configurations(params::Dict{String,<:Any}) @@ -932,6 +1096,58 @@ function put_metadata( ) end +""" + start_viewer_session_revocation(channel_arn, viewer_id) + start_viewer_session_revocation(channel_arn, viewer_id, params::Dict{String,<:Any}) + +Starts the process of revoking the viewer session associated with a specified channel ARN +and viewer ID. Optionally, you can provide a version to revoke viewer sessions less than +and including that version. For instructions on associating a viewer ID with a viewer +session, see Setting Up Private Channels. + +# Arguments +- `channel_arn`: The ARN of the channel associated with the viewer session to revoke. +- `viewer_id`: The ID of the viewer associated with the viewer session to revoke. Do not + use this field for personally identifying, confidential, or sensitive information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"viewerSessionVersionsLessThanOrEqualTo"`: An optional filter on which versions of the + viewer session to revoke. All versions less than or equal to the specified version will be + revoked. Default: 0. +""" +function start_viewer_session_revocation( + channelArn, viewerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/StartViewerSessionRevocation", + Dict{String,Any}("channelArn" => channelArn, "viewerId" => viewerId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_viewer_session_revocation( + channelArn, + viewerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs( + "POST", + "/StartViewerSessionRevocation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("channelArn" => channelArn, "viewerId" => viewerId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_stream(channel_arn) stop_stream(channel_arn, params::Dict{String,<:Any}) @@ -1063,49 +1279,24 @@ ongoing stream, update the channel, and restart the stream for the changes to ta # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authorized"`: Whether the channel is private (enabled for playback authorization). -- `"insecureIngest"`: Whether the channel allows insecure RTMP ingest. Default: false. +- `"insecureIngest"`: Whether the channel allows insecure RTMP and SRT ingest. Default: + false. - `"latencyMode"`: Channel latency mode. Use NORMAL to broadcast and deliver live video up - to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS - console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.) + to Full HD. Use LOW for near-real-time interaction with viewers. - `"name"`: Channel name. +- `"playbackRestrictionPolicyArn"`: Playback-restriction-policy ARN. A valid ARN value here + both specifies the ARN and enables playback restriction. If this is set to an empty string, + playback restriction policy is disabled. - `"preset"`: Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\"). -- `"recordingConfigurationArn"`: Recording-configuration ARN. If this is set to an empty - string, recording is disabled. A value other than an empty string indicates that recording - is enabled +- `"recordingConfigurationArn"`: Recording-configuration ARN. A valid ARN value here both + specifies the ARN and enables recording. If this is set to an empty string, recording is + disabled. - `"type"`: Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect - immediately. Some types generate multiple qualities (renditions) from the original input; - this automatically gives viewers the best experience for their devices and network - conditions. Some types provide transcoded video; transcoding allows higher playback quality - across a range of download speeds. Default: STANDARD. Valid values: BASIC: Video is - transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s - video-quality choice is limited to the original input. Input resolution can be up to 1080p - and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p - and 1080p. Original audio is passed through. STANDARD: Video is transcoded: multiple - qualities are generated from the original input, to automatically give viewers the best - experience for their devices and network conditions. Transcoding allows higher playback - quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be - up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio - is passed through. This is the default when you create a channel. ADVANCED_SD: Video is - transcoded; multiple qualities are generated from the original input, to automatically give - viewers the best experience for their devices and network conditions. Input resolution can - be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). - You can select an optional transcode preset (see below). Audio for all renditions is - transcoded, and an audio-only rendition is available. ADVANCED_HD: Video is transcoded; - multiple qualities are generated from the original input, to automatically give viewers the - best experience for their devices and network conditions. Input resolution can be up to - 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can - select an optional transcode preset (see below). Audio for all renditions is transcoded, - and an audio-only rendition is available. Optional transcode presets (available for the - ADVANCED types) allow you to trade off available download bandwidth and video quality, to - optimize the viewing experience. There are two presets: Constrained bandwidth delivery - uses a lower bitrate for each quality level. Use it if you have low download bandwidth - and/or simple video content (e.g., talking heads) Higher bandwidth delivery uses a - higher bitrate for each quality level. Use it if you have high download bandwidth and/or - complex video content (e.g., flashes and quick scene changes). + immediately. Default: STANDARD. For details, see Channel Types. """ function update_channel(arn; aws_config::AbstractAWSConfig=global_aws_config()) return ivs( @@ -1127,3 +1318,48 @@ function update_channel( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_playback_restriction_policy(arn) + update_playback_restriction_policy(arn, params::Dict{String,<:Any}) + +Updates a specified playback restriction policy. + +# Arguments +- `arn`: ARN of the playback-restriction-policy to be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"allowedCountries"`: A list of country codes that control geoblocking restriction. + Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries + (an empty array). +- `"allowedOrigins"`: A list of origin sites that control CORS restriction. Allowed values + are the same as valid values of the Origin header defined at + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an + empty array). +- `"enableStrictOriginEnforcement"`: Whether channel playback is constrained by origin + site. Default: false. +- `"name"`: Playback-restriction-policy name. The value does not need to be unique. +""" +function update_playback_restriction_policy( + arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/UpdatePlaybackRestrictionPolicy", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_playback_restriction_policy( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/UpdatePlaybackRestrictionPolicy", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/ivs_realtime.jl b/src/services/ivs_realtime.jl index d0d7a0637c..8785c3e1d2 100644 --- a/src/services/ivs_realtime.jl +++ b/src/services/ivs_realtime.jl @@ -4,6 +4,42 @@ using AWS.AWSServices: ivs_realtime using AWS.Compat using AWS.UUIDs +""" + create_encoder_configuration() + create_encoder_configuration(params::Dict{String,<:Any}) + +Creates an EncoderConfiguration object. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Optional name to identify the resource. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +- `"video"`: Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 + fps. +""" +function create_encoder_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/CreateEncoderConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_encoder_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/CreateEncoderConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_participant_token(stage_arn) create_participant_token(stage_arn, params::Dict{String,<:Any}) @@ -64,6 +100,8 @@ Creates a new stage (and optionally participant tokens). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoParticipantRecordingConfiguration"`: Auto participant recording configuration + object attached to the stage. - `"name"`: Optional name that can be specified for the stage being created. - `"participantTokenConfigurations"`: Array of participant token configuration objects to attach to the new stage. @@ -89,6 +127,82 @@ function create_stage( ) end +""" + create_storage_configuration(s3) + create_storage_configuration(s3, params::Dict{String,<:Any}) + +Creates a new storage configuration, used to enable recording to Amazon S3. When a +StorageConfiguration is created, IVS will modify the S3 bucketPolicy of the provided +bucket. This will ensure that IVS has sufficient permissions to write content to the +provided bucket. + +# Arguments +- `s3`: A complex type that contains a storage configuration for where recorded video will + be stored. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Storage configuration name. The value does not need to be unique. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function create_storage_configuration(s3; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/CreateStorageConfiguration", + Dict{String,Any}("s3" => s3); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_storage_configuration( + s3, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/CreateStorageConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("s3" => s3), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_encoder_configuration(arn) + delete_encoder_configuration(arn, params::Dict{String,<:Any}) + +Deletes an EncoderConfiguration resource. Ensures that no Compositions are using this +template; otherwise, returns an error. + +# Arguments +- `arn`: ARN of the EncoderConfiguration. + +""" +function delete_encoder_configuration( + arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeleteEncoderConfiguration", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_encoder_configuration( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeleteEncoderConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stage(arn) delete_stage(arn, params::Dict{String,<:Any}) @@ -120,6 +234,42 @@ function delete_stage( ) end +""" + delete_storage_configuration(arn) + delete_storage_configuration(arn, params::Dict{String,<:Any}) + +Deletes the storage configuration for the specified ARN. If you try to delete a storage +configuration that is used by a Composition, you will get an error (409 ConflictException). +To avoid this, for all Compositions that reference the storage configuration, first use +StopComposition and wait for it to complete, then use DeleteStorageConfiguration. + +# Arguments +- `arn`: ARN of the storage configuration to be deleted. + +""" +function delete_storage_configuration( + arn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeleteStorageConfiguration", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_storage_configuration( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeleteStorageConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disconnect_participant(participant_id, stage_arn) disconnect_participant(participant_id, stage_arn, params::Dict{String,<:Any}) @@ -168,6 +318,68 @@ function disconnect_participant( ) end +""" + get_composition(arn) + get_composition(arn, params::Dict{String,<:Any}) + +Get information about the specified Composition resource. + +# Arguments +- `arn`: ARN of the Composition resource. + +""" +function get_composition(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetComposition", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_composition( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetComposition", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_encoder_configuration(arn) + get_encoder_configuration(arn, params::Dict{String,<:Any}) + +Gets information about the specified EncoderConfiguration resource. + +# Arguments +- `arn`: ARN of the EncoderConfiguration resource. + +""" +function get_encoder_configuration(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetEncoderConfiguration", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_encoder_configuration( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetEncoderConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_participant(participant_id, session_id, stage_arn) get_participant(participant_id, session_id, stage_arn, params::Dict{String,<:Any}) @@ -296,6 +508,103 @@ function get_stage_session( ) end +""" + get_storage_configuration(arn) + get_storage_configuration(arn, params::Dict{String,<:Any}) + +Gets the storage configuration for the specified ARN. + +# Arguments +- `arn`: ARN of the storage configuration to be retrieved. + +""" +function get_storage_configuration(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetStorageConfiguration", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_storage_configuration( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetStorageConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_compositions() + list_compositions(params::Dict{String,<:Any}) + +Gets summary information about all Compositions in your account, in the AWS region where +the API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filterByEncoderConfigurationArn"`: Filters the Composition list to match the specified + EncoderConfiguration attached to at least one of its output. +- `"filterByStageArn"`: Filters the Composition list to match the specified Stage ARN. +- `"maxResults"`: Maximum number of results to return. Default: 100. +- `"nextToken"`: The first Composition to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_compositions(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", "/ListCompositions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_compositions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListCompositions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_encoder_configurations() + list_encoder_configurations(params::Dict{String,<:Any}) + +Gets summary information about all EncoderConfigurations in your account, in the AWS region +where the API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of results to return. Default: 100. +- `"nextToken"`: The first encoder configuration to retrieve. This is used for pagination; + see the nextToken response field. +""" +function list_encoder_configurations(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/ListEncoderConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_encoder_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListEncoderConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_participant_events(participant_id, session_id, stage_arn) list_participant_events(participant_id, session_id, stage_arn, params::Dict{String,<:Any}) @@ -311,8 +620,8 @@ Lists events for a specified participant that occurred during a specified stage # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: Maximum number of results to return. Default: 50. -- `"nextToken"`: The first participant to retrieve. This is used for pagination; see the - nextToken response field. +- `"nextToken"`: The first participant event to retrieve. This is used for pagination; see + the nextToken response field. """ function list_participant_events( participantId, sessionId, stageArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -368,15 +677,18 @@ Lists all participants in a specified stage session. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filterByPublished"`: Filters the response list to only show participants who published - during the stage session. Only one of filterByUserId, filterByPublished, or filterByState - can be provided per request. + during the stage session. Only one of filterByUserId, filterByPublished, filterByState, or + filterByRecordingState can be provided per request. +- `"filterByRecordingState"`: Filters the response list to only show participants with the + specified recording state. Only one of filterByUserId, filterByPublished, filterByState, or + filterByRecordingState can be provided per request. - `"filterByState"`: Filters the response list to only show participants in the specified - state. Only one of filterByUserId, filterByPublished, or filterByState can be provided per - request. + state. Only one of filterByUserId, filterByPublished, filterByState, or + filterByRecordingState can be provided per request. - `"filterByUserId"`: Filters the response list to match the specified user ID. Only one of - filterByUserId, filterByPublished, or filterByState can be provided per request. A userId - is a customer-assigned name to help identify the token; this can be used to link a - participant to a user in the customer’s own systems. + filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided + per request. A userId is a customer-assigned name to help identify the token; this can be + used to link a participant to a user in the customer’s own systems. - `"maxResults"`: Maximum number of results to return. Default: 50. - `"nextToken"`: The first participant to retrieve. This is used for pagination; see the nextToken response field. @@ -425,7 +737,7 @@ Gets all sessions for a specified stage. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: Maximum number of results to return. Default: 50. -- `"nextToken"`: The first stage to retrieve. This is used for pagination; see the +- `"nextToken"`: The first stage session to retrieve. This is used for pagination; see the nextToken response field. """ function list_stage_sessions(stageArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -483,6 +795,40 @@ function list_stages( ) end +""" + list_storage_configurations() + list_storage_configurations(params::Dict{String,<:Any}) + +Gets summary information about all storage configurations in your account, in the AWS +region where the API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of storage configurations to return. Default: your service + quota or 100, whichever is smaller. +- `"nextToken"`: The first storage configuration to retrieve. This is used for pagination; + see the nextToken response field. +""" +function list_storage_configurations(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/ListStorageConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_storage_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListStorageConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -517,6 +863,103 @@ function list_tags_for_resource( ) end +""" + start_composition(destinations, stage_arn) + start_composition(destinations, stage_arn, params::Dict{String,<:Any}) + +Starts a Composition from a stage based on the configuration provided in the request. A +Composition is an ephemeral resource that exists after this endpoint returns successfully. +Composition stops and the resource is deleted: When StopComposition is called. After a +1-minute timeout, when all participants are disconnected from the stage. After a 1-minute +timeout, if there are no participants in the stage when StartComposition is called. When +broadcasting to the IVS channel fails and all retries are exhausted. When broadcasting is +disconnected and all attempts to reconnect are exhausted. + +# Arguments +- `destinations`: Array of destination configuration. +- `stage_arn`: ARN of the stage to be used for compositing. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"idempotencyToken"`: Idempotency token. +- `"layout"`: Layout object to configure composition parameters. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function start_composition( + destinations, stageArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/StartComposition", + Dict{String,Any}( + "destinations" => destinations, + "stageArn" => stageArn, + "idempotencyToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_composition( + destinations, + stageArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs_realtime( + "POST", + "/StartComposition", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinations" => destinations, + "stageArn" => stageArn, + "idempotencyToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_composition(arn) + stop_composition(arn, params::Dict{String,<:Any}) + +Stops and deletes a Composition resource. Any broadcast from the Composition resource is +stopped. + +# Arguments +- `arn`: ARN of the Composition. + +""" +function stop_composition(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/StopComposition", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_composition( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/StopComposition", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -606,6 +1049,9 @@ Updates a stage’s configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoParticipantRecordingConfiguration"`: Auto-participant-recording configuration + object to attach to the stage. Auto-participant-recording configuration cannot be updated + while recording is active. - `"name"`: Name of the stage to be updated. """ function update_stage(arn; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/kafka.jl b/src/services/kafka.jl index 79bbe07693..3d7d9b5b2e 100644 --- a/src/services/kafka.jl +++ b/src/services/kafka.jl @@ -282,6 +282,74 @@ function create_configuration( ) end +""" + create_replicator(kafka_clusters, replication_info_list, replicator_name, service_execution_role_arn) + create_replicator(kafka_clusters, replication_info_list, replicator_name, service_execution_role_arn, params::Dict{String,<:Any}) + +Creates the replicator. + +# Arguments +- `kafka_clusters`: Kafka Clusters to use in setting up sources / targets for replication. +- `replication_info_list`: A list of replication configurations, where each configuration + targets a given source cluster to target cluster replication flow. +- `replicator_name`: The name of the replicator. Alpha-numeric characters with '-' are + allowed. +- `service_execution_role_arn`: The ARN of the IAM role used by the replicator to access + resources in the customer's account (e.g source and target clusters) + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A summary description of the replicator. +- `"tags"`: List of tags to attach to created Replicator. +""" +function create_replicator( + kafkaClusters, + replicationInfoList, + replicatorName, + serviceExecutionRoleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "POST", + "/replication/v1/replicators", + Dict{String,Any}( + "kafkaClusters" => kafkaClusters, + "replicationInfoList" => replicationInfoList, + "replicatorName" => replicatorName, + "serviceExecutionRoleArn" => serviceExecutionRoleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_replicator( + kafkaClusters, + replicationInfoList, + replicatorName, + serviceExecutionRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "POST", + "/replication/v1/replicators", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "kafkaClusters" => kafkaClusters, + "replicationInfoList" => replicationInfoList, + "replicatorName" => replicatorName, + "serviceExecutionRoleArn" => serviceExecutionRoleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_vpc_connection(authentication, client_subnets, security_groups, target_cluster_arn, vpc_id) create_vpc_connection(authentication, client_subnets, security_groups, target_cluster_arn, vpc_id, params::Dict{String,<:Any}) @@ -469,6 +537,41 @@ function delete_configuration( ) end +""" + delete_replicator(replicator_arn) + delete_replicator(replicator_arn, params::Dict{String,<:Any}) + +Deletes a replicator. + +# Arguments +- `replicator_arn`: The Amazon Resource Name (ARN) of the replicator to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"currentVersion"`: The current version of the replicator. +""" +function delete_replicator(replicatorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return kafka( + "DELETE", + "/replication/v1/replicators/$(replicatorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_replicator( + replicatorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "DELETE", + "/replication/v1/replicators/$(replicatorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_vpc_connection(arn) delete_vpc_connection(arn, params::Dict{String,<:Any}) @@ -575,6 +678,41 @@ function describe_cluster_operation( ) end +""" + describe_cluster_operation_v2(cluster_operation_arn) + describe_cluster_operation_v2(cluster_operation_arn, params::Dict{String,<:Any}) + + + Returns a description of the cluster operation specified by the ARN. + +# Arguments +- `cluster_operation_arn`: ARN of the cluster operation to describe. + +""" +function describe_cluster_operation_v2( + clusterOperationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/api/v2/operations/$(clusterOperationArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_cluster_operation_v2( + clusterOperationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "GET", + "/api/v2/operations/$(clusterOperationArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_cluster_v2(cluster_arn) describe_cluster_v2(cluster_arn, params::Dict{String,<:Any}) @@ -685,6 +823,40 @@ function describe_configuration_revision( ) end +""" + describe_replicator(replicator_arn) + describe_replicator(replicator_arn, params::Dict{String,<:Any}) + +Describes a replicator. + +# Arguments +- `replicator_arn`: The Amazon Resource Name (ARN) of the replicator to be described. + +""" +function describe_replicator( + replicatorArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/replication/v1/replicators/$(replicatorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_replicator( + replicatorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "GET", + "/replication/v1/replicators/$(replicatorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_vpc_connection(arn) describe_vpc_connection(arn, params::Dict{String,<:Any}) @@ -918,6 +1090,46 @@ function list_cluster_operations( ) end +""" + list_cluster_operations_v2(cluster_arn) + list_cluster_operations_v2(cluster_arn, params::Dict{String,<:Any}) + + + Returns a list of all the operations that have been performed on the specified +MSK cluster. + +# Arguments +- `cluster_arn`: The arn of the cluster whose operations are being requested. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maxResults of the query. +- `"nextToken"`: The nextToken of the query. +""" +function list_cluster_operations_v2( + clusterArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/api/v2/clusters/$(clusterArn)/operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cluster_operations_v2( + clusterArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "GET", + "/api/v2/clusters/$(clusterArn)/operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_clusters() list_clusters(params::Dict{String,<:Any}) @@ -1157,6 +1369,40 @@ function list_nodes( ) end +""" + list_replicators() + list_replicators(params::Dict{String,<:Any}) + +Lists the replicators. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If there are + more results, the response includes a NextToken parameter. +- `"nextToken"`: If the response of ListReplicators is truncated, it returns a NextToken in + the response. This NextToken should be sent in the subsequent request to ListReplicators. +- `"replicatorNameFilter"`: Returns replicators starting with given name. +""" +function list_replicators(; aws_config::AbstractAWSConfig=global_aws_config()) + return kafka( + "GET", + "/replication/v1/replicators"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_replicators( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/replication/v1/replicators", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_scram_secrets(cluster_arn) list_scram_secrets(cluster_arn, params::Dict{String,<:Any}) @@ -1972,6 +2218,69 @@ function update_monitoring( ) end +""" + update_replication_info(current_version, replicator_arn, source_kafka_cluster_arn, target_kafka_cluster_arn) + update_replication_info(current_version, replicator_arn, source_kafka_cluster_arn, target_kafka_cluster_arn, params::Dict{String,<:Any}) + +Updates replication info of a replicator. + +# Arguments +- `current_version`: Current replicator version. +- `replicator_arn`: The Amazon Resource Name (ARN) of the replicator to be updated. +- `source_kafka_cluster_arn`: The ARN of the source Kafka cluster. +- `target_kafka_cluster_arn`: The ARN of the target Kafka cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"consumerGroupReplication"`: Updated consumer group replication information. +- `"topicReplication"`: Updated topic replication information. +""" +function update_replication_info( + currentVersion, + replicatorArn, + sourceKafkaClusterArn, + targetKafkaClusterArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "PUT", + "/replication/v1/replicators/$(replicatorArn)/replication-info", + Dict{String,Any}( + "currentVersion" => currentVersion, + "sourceKafkaClusterArn" => sourceKafkaClusterArn, + "targetKafkaClusterArn" => targetKafkaClusterArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_replication_info( + currentVersion, + replicatorArn, + sourceKafkaClusterArn, + targetKafkaClusterArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "PUT", + "/replication/v1/replicators/$(replicatorArn)/replication-info", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "currentVersion" => currentVersion, + "sourceKafkaClusterArn" => sourceKafkaClusterArn, + "targetKafkaClusterArn" => targetKafkaClusterArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_security(cluster_arn, current_version) update_security(cluster_arn, current_version, params::Dict{String,<:Any}) diff --git a/src/services/kafkaconnect.jl b/src/services/kafkaconnect.jl index 0079b31707..610d051f90 100644 --- a/src/services/kafkaconnect.jl +++ b/src/services/kafkaconnect.jl @@ -23,7 +23,11 @@ Creates a connector using the specified properties. Kafka cluster. - `kafka_connect_version`: The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. -- `plugins`: Specifies which plugins to use for the connector. +- `plugins`: Amazon MSK Connect does not currently support specifying multiple plugins as + a list. To use more than one plugin for your connector, you can create a single custom + plugin using a ZIP file that bundles multiple plugins together. Specifies which plugin to + use for the connector. You must specify a single-element list containing one customPlugin + object. - `service_execution_role_arn`: The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a @@ -33,6 +37,7 @@ Creates a connector using the specified properties. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"connectorDescription"`: A summary description of the connector. - `"logDelivery"`: Details about log delivery. +- `"tags"`: The tags you want to attach to the connector. - `"workerConfiguration"`: Specifies which worker configuration to use with the connector. """ function create_connector( @@ -117,6 +122,7 @@ Creates a custom plugin using the specified properties. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: A summary description of the custom plugin. +- `"tags"`: The tags you want to attach to the custom plugin. """ function create_custom_plugin( contentType, location, name; aws_config::AbstractAWSConfig=global_aws_config() @@ -168,6 +174,7 @@ Creates a worker configuration using the specified properties. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: A summary description of the worker configuration. +- `"tags"`: The tags you want to attach to the worker configuration. """ function create_worker_configuration( name, propertiesFileContent; aws_config::AbstractAWSConfig=global_aws_config() @@ -273,6 +280,41 @@ function delete_custom_plugin( ) end +""" + delete_worker_configuration(worker_configuration_arn) + delete_worker_configuration(worker_configuration_arn, params::Dict{String,<:Any}) + +Deletes the specified worker configuration. + +# Arguments +- `worker_configuration_arn`: The Amazon Resource Name (ARN) of the worker configuration + that you want to delete. + +""" +function delete_worker_configuration( + workerConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafkaconnect( + "DELETE", + "/v1/worker-configurations/$(workerConfigurationArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_worker_configuration( + workerConfigurationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafkaconnect( + "DELETE", + "/v1/worker-configurations/$(workerConfigurationArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_connector(connector_arn) describe_connector(connector_arn, params::Dict{String,<:Any}) @@ -418,6 +460,7 @@ Returns a list of all of the custom plugins in this account and Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of custom plugins to list in one response. +- `"namePrefix"`: Lists custom plugin names that start with the specified text string. - `"nextToken"`: If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off. @@ -439,6 +482,41 @@ function list_custom_plugins( ) end +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all the tags attached to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to list + all attached tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafkaconnect( + "GET", + "/v1/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafkaconnect( + "GET", + "/v1/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_worker_configurations() list_worker_configurations(params::Dict{String,<:Any}) @@ -448,6 +526,8 @@ Returns a list of all of the worker configurations in this account and Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of worker configurations to list in one response. +- `"namePrefix"`: Lists worker configuration names that start with the specified text + string. - `"nextToken"`: If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off. @@ -472,6 +552,80 @@ function list_worker_configurations( ) end +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Attaches tags to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to which you want to + attach tags. +- `tags`: The tags that you want to attach to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return kafkaconnect( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafkaconnect( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource from which you want to + remove tags. +- `tag_keys`: The keys of the tags that you want to remove from the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafkaconnect( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafkaconnect( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_connector(capacity, connector_arn, current_version) update_connector(capacity, connector_arn, current_version, params::Dict{String,<:Any}) diff --git a/src/services/kendra.jl b/src/services/kendra.jl index 4f8173e822..c22b1aa8a2 100644 --- a/src/services/kendra.jl +++ b/src/services/kendra.jl @@ -107,7 +107,10 @@ end Removes one or more documents from an index. The documents must have been added with the BatchPutDocument API. The documents are deleted asynchronously. You can see the progress of the deletion by using Amazon Web Services CloudWatch. Any error messages related to the -processing of the batch are sent to you CloudWatch log. +processing of the batch are sent to your Amazon Web Services CloudWatch log. You can also +use the BatchGetDocumentStatus API to monitor the progress of deleting your documents. +Deleting documents from an index using BatchDeleteDocument could take up to an hour or +more, depending on the number of documents you want to delete. # Arguments - `document_id_list`: One or more identifiers for documents to delete from the index. @@ -256,8 +259,9 @@ ingest your text and unstructured text into an index, add custom attributes to t documents, and to attach an access control list to the documents added to the index. The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to -your Amazon Web Services CloudWatch log. For an example of ingesting inline documents using -Python and Java SDKs, see Adding files directly to an index. +your Amazon Web Services CloudWatch log. You can also use the BatchGetDocumentStatus API to +monitor the progress of indexing your documents. For an example of ingesting inline +documents using Python and Java SDKs, see Adding files directly to an index. # Arguments - `documents`: One or more documents to add to the index. Documents have the following file @@ -532,8 +536,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: A description for your Amazon Kendra experience. - `"RoleArn"`: The Amazon Resource Name (ARN) of an IAM role with permission to access Query API, GetQuerySuggestions API, and other required APIs. The role also must include - permission to access IAM Identity Center (successor to Single Sign-On) that stores your - user and group information. For more information, see IAM access roles for Amazon Kendra. + permission to access IAM Identity Center that stores your user and group information. For + more information, see IAM access roles for Amazon Kendra. """ function create_experience(IndexId, Name; aws_config::AbstractAWSConfig=global_aws_config()) return kendra( @@ -591,9 +595,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: A description for the FAQ. - `"FileFormat"`: The format of the FAQ input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that - includes custom attributes. The format must match the format of the file stored in the S3 - bucket identified in the S3Path parameter. For more information, see Adding questions and - answers. + includes custom attributes. The default format is CSV. The format must match the format of + the file stored in the S3 bucket identified in the S3Path parameter. For more information, + see Adding questions and answers. - `"LanguageCode"`: The code for a language. This allows you to support a language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English. @@ -720,7 +724,7 @@ end Creates an Amazon Kendra index. Index creation is an asynchronous API. To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. -The Status field is set to ACTIVE when the index is ready to use. Once the index is active +The Status field is set to ACTIVE when the index is ready to use. Once the index is active, you can index your documents using the BatchPutDocument API or using one of the supported data sources. For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using @@ -755,8 +759,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys control to filter search results on user context. All documents with no access control and all documents accessible to the user will be searchable and displayable. - `"UserGroupResolutionConfiguration"`: Gets users and groups from IAM Identity Center - (successor to Single Sign-On) identity source. To configure this, see - UserGroupResolutionConfiguration. + identity source. To configure this, see UserGroupResolutionConfiguration. This is useful + for user context filtering, where search results are filtered based on the user or their + group access to documents. - `"UserTokenConfigurations"`: The user token configuration. """ function create_index(Name, RoleArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -989,7 +994,9 @@ end Deletes an Amazon Kendra data source connector. An exception is not thrown if the data source is already being deleted. While the data source is being deleted, the Status field returned by a call to the DescribeDataSource API is set to DELETING. For more information, -see Deleting Data Sources. +see Deleting Data Sources. Deleting an entire data source or re-syncing your index after +deleting specific documents from a data source could take up to an hour or more, depending +on the number of documents you want to delete. # Arguments - `id`: The identifier of the data source connector you want to delete. @@ -1095,8 +1102,8 @@ end delete_index(id) delete_index(id, params::Dict{String,<:Any}) -Deletes an existing Amazon Kendra index. An exception is not thrown if the index is already -being deleted. While the index is being deleted, the Status field returned by a call to the +Deletes an Amazon Kendra index. An exception is not thrown if the index is already being +deleted. While the index is being deleted, the Status field returned by a call to the DescribeIndex API is set to DELETING. # Arguments @@ -1234,7 +1241,7 @@ end delete_thesaurus(id, index_id) delete_thesaurus(id, index_id, params::Dict{String,<:Any}) -Deletes an existing Amazon Kendra thesaurus. +Deletes an Amazon Kendra thesaurus. # Arguments - `id`: The identifier of the thesaurus you want to delete. @@ -1466,7 +1473,7 @@ end describe_index(id) describe_index(id, params::Dict{String,<:Any}) -Gets information about an existing Amazon Kendra index. +Gets information about an Amazon Kendra index. # Arguments - `id`: The identifier of the index you want to get information on. @@ -1619,7 +1626,7 @@ end describe_thesaurus(id, index_id) describe_thesaurus(id, index_id, params::Dict{String,<:Any}) -Gets information about an existing Amazon Kendra thesaurus. +Gets information about an Amazon Kendra thesaurus. # Arguments - `id`: The identifier of the thesaurus you want to get information on. @@ -2473,50 +2480,60 @@ end query(index_id) query(index_id, params::Dict{String,<:Any}) -Searches an active index. Use this API to search your documents using query. The Query API -enables to do faceted search and to filter results based on document attributes. It also -enables you to provide user context that Amazon Kendra uses to enforce document access -control in the search results. Amazon Kendra searches your index for text content and -question and answer (FAQ) content. By default the response contains three types of results. - Relevant passages Matching FAQs Relevant documents You can specify that the query -return only one type of result using the QueryResultTypeFilter parameter. Each query -returns the 100 most relevant results. - -# Arguments -- `index_id`: The identifier of the index to search. The identifier is returned in the - response from the CreateIndex API. +Searches an index given an input query. If you are working with large language models +(LLMs) or implementing retrieval augmented generation (RAG) systems, you can use Amazon +Kendra's Retrieve API, which can return longer semantically relevant passages. We recommend +using the Retrieve API instead of filing a service limit increase to increase the Query API +document excerpt length. You can configure boosting or relevance tuning at the query level +to override boosting at the index level, filter based on document fields/attributes and +faceted search, and filter based on the user or their group access to documents. You can +also include certain fields in the response that might provide useful additional +information. A query response contains three types of results. Relevant suggested +answers. The answers can be either a text excerpt or table excerpt. The answer can be +highlighted in the excerpt. Matching FAQs or questions-answer from your FAQ file. +Relevant documents. This result type includes an excerpt of the document with the document +title. The searched terms can be highlighted in the excerpt. You can specify that the +query return only one type of result using the QueryResultTypeFilter parameter. Each query +returns the 100 most relevant results. If you filter result type to only question-answers, +a maximum of four results are returned. If you filter result type to only answers, a +maximum of three results are returned. + +# Arguments +- `index_id`: The identifier of the index for the search. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AttributeFilter"`: Enables filtered searches based on document attributes. You can only +- `"AttributeFilter"`: Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters - parameters contain a list of other filters. The AttributeFilter parameter enables you to + parameters contain a list of other filters. The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results. +- `"CollapseConfiguration"`: Provides configuration to determine how to group results by + document attribute value, and how to display them (collapsed or expanded) under a + designated primary document for each group. - `"DocumentRelevanceOverrideConfigurations"`: Overrides relevance tuning configurations of - fields or attributes set at the index level. If you use this API to override the relevance + fields/attributes set at the index level. If you use this API to override the relevance tuning configured at the index level, but there is no relevance tuning configured at the index level, then Amazon Kendra does not apply any relevance tuning. If there is relevance - tuning configured at the index level, but you do not use this API to override any relevance - tuning in the index, then Amazon Kendra uses the relevance tuning that is configured at the - index level. If there is relevance tuning configured for fields at the index level, but you - use this API to override only some of these fields, then for the fields you did not - override, the importance is set to 1. -- `"Facets"`: An array of documents attributes. Amazon Kendra returns a count for each - attribute key specified. This helps your users narrow their search. + tuning configured for fields at the index level, and you use this API to override only some + of these fields, then for the fields you did not override, the importance is set to 1. +- `"Facets"`: An array of documents fields/attributes for faceted search. Amazon Kendra + returns a count for each field key specified. This helps your users narrow their search. - `"PageNumber"`: Query results are returned in pages the size of the PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this parameter to get result pages after the first one. - `"PageSize"`: Sets the number of results that are returned in each page of results. The default page size is 10. The maximum number of results returned is 100. If you ask for more than 100 results, only 100 are returned. -- `"QueryResultTypeFilter"`: Sets the type of query. Only results for the specified query - type are returned. +- `"QueryResultTypeFilter"`: Sets the type of query result or response. Only results for + the specified type are returned. - `"QueryText"`: The input query text for the search. Amazon Kendra truncates queries at 30 token words, which excludes punctuation and stop words. Truncation still applies if you use - Boolean or more advanced, complex queries. -- `"RequestedDocumentAttributes"`: An array of document attributes to include in the - response. You can limit the response to include certain document attributes. By default all + Boolean or more advanced, complex queries. For example, Timeoff AND October AND Category:HR + is counted as 3 tokens: timeoff, october, hr. For more information, see Searching with + advanced query syntax in the Amazon Kendra Developer Guide. +- `"RequestedDocumentAttributes"`: An array of document fields/attributes to include in the + response. You can limit the response to include certain document fields. By default, all document attributes are included in the response. - `"SortingConfiguration"`: Provides information that determines how the results of the query are sorted. You can set the field that Amazon Kendra should sort the results on, and @@ -2524,6 +2541,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys of ties in sorting the results, the results are sorted by relevance. If you don't provide sorting configuration, the results are sorted by the relevance that Amazon Kendra determines for the result. +- `"SortingConfigurations"`: Provides configuration information to determine how the + results of a query are sorted. You can set upto 3 fields that Amazon Kendra should sort the + results on, and specify whether the results should be sorted in ascending or descending + order. The sort field quota can be increased. If you don't provide a sorting configuration, + the results are sorted by the relevance that Amazon Kendra determines for the result. In + the case of ties in sorting the results, the results are sorted by relevance. - `"SpellCorrectionConfiguration"`: Enables suggested spell corrections for queries. - `"UserContext"`: The user context token or user and group information. - `"VisitorId"`: Provides an identifier for a specific user. The VisitorId should be a @@ -2549,12 +2572,97 @@ function query( ) end +""" + retrieve(index_id, query_text) + retrieve(index_id, query_text, params::Dict{String,<:Any}) + +Retrieves relevant passages or text excerpts given an input query. This API is similar to +the Query API. However, by default, the Query API only returns excerpt passages of up to +100 token words. With the Retrieve API, you can retrieve longer passages of up to 200 token +words and up to 100 semantically relevant passages. This doesn't include question-answer or +FAQ type responses from your index. The passages are text excerpts that can be semantically +extracted from multiple documents and multiple parts of the same document. If in extreme +cases your documents produce zero passages using the Retrieve API, you can alternatively +use the Query API and its types of responses. You can also do the following: Override +boosting at the index level Filter based on document fields or attributes Filter based +on the user or their group access to documents View the confidence score bucket for a +retrieved passage result. The confidence bucket provides a relative ranking that indicates +how confident Amazon Kendra is that the response is relevant to the query. Confidence +score buckets are currently available only for English. You can also include certain +fields in the response that might provide useful additional information. The Retrieve API +shares the number of query capacity units that you set for your index. For more information +on what's included in a single capacity unit and the default base capacity for an index, +see Adjusting capacity. + +# Arguments +- `index_id`: The identifier of the index to retrieve relevant passages for the search. +- `query_text`: The input query text to retrieve relevant passages for the search. Amazon + Kendra truncates queries at 30 token words, which excludes punctuation and stop words. + Truncation still applies if you use Boolean or more advanced, complex queries. For example, + Timeoff AND October AND Category:HR is counted as 3 tokens: timeoff, october, hr. For more + information, see Searching with advanced query syntax in the Amazon Kendra Developer Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributeFilter"`: Filters search results by document fields/attributes. You can only + provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters + parameters contain a list of other filters. The AttributeFilter parameter means you can + create a set of filtering rules that a document must satisfy to be included in the query + results. +- `"DocumentRelevanceOverrideConfigurations"`: Overrides relevance tuning configurations of + fields/attributes set at the index level. If you use this API to override the relevance + tuning configured at the index level, but there is no relevance tuning configured at the + index level, then Amazon Kendra does not apply any relevance tuning. If there is relevance + tuning configured for fields at the index level, and you use this API to override only some + of these fields, then for the fields you did not override, the importance is set to 1. +- `"PageNumber"`: Retrieved relevant passages are returned in pages the size of the + PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this + parameter to get result pages after the first one. +- `"PageSize"`: Sets the number of retrieved relevant passages that are returned in each + page of results. The default page size is 10. The maximum number of results returned is + 100. If you ask for more than 100 results, only 100 are returned. +- `"RequestedDocumentAttributes"`: A list of document fields/attributes to include in the + response. You can limit the response to include certain document fields. By default, all + document fields are included in the response. +- `"UserContext"`: The user context token or user and group information. +""" +function retrieve(IndexId, QueryText; aws_config::AbstractAWSConfig=global_aws_config()) + return kendra( + "Retrieve", + Dict{String,Any}("IndexId" => IndexId, "QueryText" => QueryText); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function retrieve( + IndexId, + QueryText, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kendra( + "Retrieve", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IndexId" => IndexId, "QueryText" => QueryText), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_data_source_sync_job(id, index_id) start_data_source_sync_job(id, index_id, params::Dict{String,<:Any}) Starts a synchronization job for a data source connector. If a synchronization job is -already in progress, Amazon Kendra returns a ResourceInUseException exception. +already in progress, Amazon Kendra returns a ResourceInUseException exception. Re-syncing +your data source with your index after modifying, adding, or deleting documents from your +data source respository could take up to an hour or more, depending on the number of +documents to sync. # Arguments - `id`: The identifier of the data source connector to synchronize. @@ -2823,7 +2931,7 @@ end update_data_source(id, index_id) update_data_source(id, index_id, params::Dict{String,<:Any}) -Updates an existing Amazon Kendra data source connector. +Updates an Amazon Kendra data source connector. # Arguments - `id`: The identifier of the data source connector you want to update. @@ -2985,7 +3093,7 @@ end update_index(id) update_index(id, params::Dict{String,<:Any}) -Updates an existing Amazon Kendra index. +Updates an Amazon Kendra index. # Arguments - `id`: The identifier of the index you want to update. @@ -3000,13 +3108,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DocumentMetadataConfigurationUpdates"`: The document metadata configuration you want to update for the index. Document metadata are fields or attributes associated with your documents. For example, the company department name associated with each document. -- `"Name"`: The name of the index you want to update. +- `"Name"`: A new name for the index. - `"RoleArn"`: An Identity and Access Management (IAM) role that gives Amazon Kendra permission to access Amazon CloudWatch logs and metrics. - `"UserContextPolicy"`: The user context policy. -- `"UserGroupResolutionConfiguration"`: Enables fetching access levels of groups and users - from an IAM Identity Center (successor to Single Sign-On) identity source. To configure - this, see UserGroupResolutionConfiguration. +- `"UserGroupResolutionConfiguration"`: Gets users and groups from IAM Identity Center + identity source. To configure this, see UserGroupResolutionConfiguration. This is useful + for user context filtering, where search results are filtered based on the user or their + group access to documents. - `"UserTokenConfigurations"`: The user token configuration. """ function update_index(Id; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/keyspaces.jl b/src/services/keyspaces.jl index d14a9dca60..7c220d8604 100644 --- a/src/services/keyspaces.jl +++ b/src/services/keyspaces.jl @@ -82,6 +82,13 @@ Creating tables in the Amazon Keyspaces Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoScalingSpecification"`: The optional auto scaling settings for a table in + provisioned capacity mode. Specifies if the service can manage throughput capacity + automatically on your behalf. Auto scaling helps you provision throughput capacity for + variable workloads efficiently by increasing and decreasing your table's read and write + capacity automatically in response to application traffic. For more information, see + Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon + Keyspaces Developer Guide. By default, auto scaling is disabled for a table. - `"capacitySpecification"`: Specifies the read/write throughput capacity mode for the table. The options are: throughputMode:PAY_PER_REQUEST and throughputMode:PROVISIONED - Provisioned capacity mode requires readCapacityUnits and @@ -106,6 +113,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys table. The options are: status=ENABLED status=DISABLED If it's not specified, the default is status=DISABLED. For more information, see Point-in-time recovery in the Amazon Keyspaces Developer Guide. +- `"replicaSpecifications"`: The optional Amazon Web Services Region specific settings of a + multi-Region table. These settings overwrite the general settings of the table for the + specified Region. For a multi-Region table in provisioned capacity mode, you can configure + the table's read capacity differently for each Region's replica. The write capacity, + however, remains synchronized between all replicas to ensure that there's enough capacity + to replicate writes across all Regions. To define the read capacity for a table replica in + a specific Region, you can do so by configuring the following parameters. region: The + Region where these settings are applied. (Required) readCapacityUnits: The provisioned + read capacity units. (Optional) readCapacityAutoScaling: The read capacity auto scaling + settings for the table. (Optional) - `"tags"`: A list of key-value pair tags to be attached to the resource. For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer Guide. @@ -313,6 +330,57 @@ function get_table( ) end +""" + get_table_auto_scaling_settings(keyspace_name, table_name) + get_table_auto_scaling_settings(keyspace_name, table_name, params::Dict{String,<:Any}) + +Returns auto scaling related settings of the specified table in JSON format. If the table +is a multi-Region table, the Amazon Web Services Region specific auto scaling settings of +the table are included. Amazon Keyspaces auto scaling helps you provision throughput +capacity for variable workloads efficiently by increasing and decreasing your table's read +and write capacity automatically in response to application traffic. For more information, +see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the +Amazon Keyspaces Developer Guide. GetTableAutoScalingSettings can't be used as an action +in an IAM policy. To define permissions for GetTableAutoScalingSettings, you must allow +the following two actions in the IAM policy statement's Action element: +application-autoscaling:DescribeScalableTargets +application-autoscaling:DescribeScalingPolicies + +# Arguments +- `keyspace_name`: The name of the keyspace. +- `table_name`: The name of the table. + +""" +function get_table_auto_scaling_settings( + keyspaceName, tableName; aws_config::AbstractAWSConfig=global_aws_config() +) + return keyspaces( + "GetTableAutoScalingSettings", + Dict{String,Any}("keyspaceName" => keyspaceName, "tableName" => tableName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_table_auto_scaling_settings( + keyspaceName, + tableName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return keyspaces( + "GetTableAutoScalingSettings", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("keyspaceName" => keyspaceName, "tableName" => tableName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_keyspaces() list_keyspaces(params::Dict{String,<:Any}) @@ -429,24 +497,24 @@ end restore_table(source_keyspace_name, source_table_name, target_keyspace_name, target_table_name) restore_table(source_keyspace_name, source_table_name, target_keyspace_name, target_table_name, params::Dict{String,<:Any}) -Restores the specified table to the specified point in time within the -earliest_restorable_timestamp and the current time. For more information about restore -points, see Time window for PITR continuous backups in the Amazon Keyspaces Developer -Guide. Any number of users can execute up to 4 concurrent restores (any type of restore) in -a given account. When you restore using point in time recovery, Amazon Keyspaces restores -your source table's schema and data to the state based on the selected timestamp -(day:hour:minute:second) to a new table. The Time to Live (TTL) settings are also restored -to the state based on the selected timestamp. In addition to the table's schema, data, and -TTL settings, RestoreTable restores the capacity mode, encryption, and point-in-time -recovery settings from the source table. Unlike the table's schema data and TTL settings, -which are restored based on the selected timestamp, these settings are always restored -based on the table's settings as of the current time or when the table was deleted. You can -also overwrite these settings during restore: Read/write capacity mode Provisioned -throughput capacity settings Point-in-time (PITR) settings Tags For more information, -see PITR restore settings in the Amazon Keyspaces Developer Guide. Note that the following -settings are not restored, and you must configure them manually for the new table: -Automatic scaling policies (for tables that use provisioned capacity mode) Identity and -Access Management (IAM) policies Amazon CloudWatch metrics and alarms +Restores the table to the specified point in time within the earliest_restorable_timestamp +and the current time. For more information about restore points, see Time window for PITR +continuous backups in the Amazon Keyspaces Developer Guide. Any number of users can execute +up to 4 concurrent restores (any type of restore) in a given account. When you restore +using point in time recovery, Amazon Keyspaces restores your source table's schema and data +to the state based on the selected timestamp (day:hour:minute:second) to a new table. The +Time to Live (TTL) settings are also restored to the state based on the selected timestamp. +In addition to the table's schema, data, and TTL settings, RestoreTable restores the +capacity mode, auto scaling settings, encryption settings, and point-in-time recovery +settings from the source table. Unlike the table's schema data and TTL settings, which are +restored based on the selected timestamp, these settings are always restored based on the +table's settings as of the current time or when the table was deleted. You can also +overwrite these settings during restore: Read/write capacity mode Provisioned +throughput capacity units Auto scaling settings Point-in-time (PITR) settings Tags +For more information, see PITR restore settings in the Amazon Keyspaces Developer Guide. +Note that the following settings are not restored, and you must configure them manually for +the new table: Identity and Access Management (IAM) policies Amazon CloudWatch metrics +and alarms # Arguments - `source_keyspace_name`: The keyspace name of the source table. @@ -456,6 +524,13 @@ Access Management (IAM) policies Amazon CloudWatch metrics and alarms # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoScalingSpecification"`: The optional auto scaling settings for the restored table + in provisioned capacity mode. Specifies if the service can manage throughput capacity of a + provisioned table automatically on your behalf. Amazon Keyspaces auto scaling helps you + provision throughput capacity for variable workloads efficiently by increasing and + decreasing your table's read and write capacity automatically in response to application + traffic. For more information, see Managing throughput capacity automatically with Amazon + Keyspaces auto scaling in the Amazon Keyspaces Developer Guide. - `"capacitySpecificationOverride"`: Specifies the read/write throughput capacity mode for the target table. The options are: throughputMode:PAY_PER_REQUEST throughputMode:PROVISIONED - Provisioned capacity mode requires readCapacityUnits and @@ -472,6 +547,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys target table. The options are: status=ENABLED status=DISABLED If it's not specified, the default is status=DISABLED. For more information, see Point-in-time recovery in the Amazon Keyspaces Developer Guide. +- `"replicaSpecifications"`: The optional Region specific settings of a multi-Regional + table. - `"restoreTimestamp"`: The restore timestamp in ISO 8601 format. - `"tagsOverride"`: A list of key-value pair tags to be attached to the restored table. For more information, see Adding tags and labels to Amazon Keyspaces resources in the @@ -615,8 +692,8 @@ end update_table(keyspace_name, table_name, params::Dict{String,<:Any}) Adds new columns to the table or updates one of the table's settings, for example capacity -mode, encryption, point-in-time recovery, or ttl settings. Note that you can only update -one specific table setting per update operation. +mode, auto scaling, encryption, point-in-time recovery, or ttl settings. Note that you can +only update one specific table setting per update operation. # Arguments - `keyspace_name`: The name of the keyspace the specified table is stored in. @@ -627,6 +704,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"addColumns"`: For each column to be added to the specified table: name - The name of the column. type - An Amazon Keyspaces data type. For more information, see Data types in the Amazon Keyspaces Developer Guide. +- `"autoScalingSpecification"`: The optional auto scaling settings to update for a table in + provisioned capacity mode. Specifies if the service can manage throughput capacity of a + provisioned table automatically on your behalf. Amazon Keyspaces auto scaling helps you + provision throughput capacity for variable workloads efficiently by increasing and + decreasing your table's read and write capacity automatically in response to application + traffic. If auto scaling is already enabled for the table, you can use UpdateTable to + update the minimum and maximum values or the auto scaling policy settings independently. + For more information, see Managing throughput capacity automatically with Amazon Keyspaces + auto scaling in the Amazon Keyspaces Developer Guide. - `"capacitySpecification"`: Modifies the read/write throughput capacity mode for the table. The options are: throughputMode:PAY_PER_REQUEST and throughputMode:PROVISIONED - Provisioned capacity mode requires readCapacityUnits and @@ -650,6 +736,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys options are: status=ENABLED status=DISABLED If it's not specified, the default is status=DISABLED. For more information, see Point-in-time recovery in the Amazon Keyspaces Developer Guide. +- `"replicaSpecifications"`: The Region specific settings of a multi-Regional table. - `"ttl"`: Modifies Time to Live custom settings for the table. The options are: status:enabled status:disabled The default is status:disabled. After ttl is enabled, you can't disable it for the table. For more information, see Expiring data by using Amazon diff --git a/src/services/kinesis.jl b/src/services/kinesis.jl index e060b4f4b0..233bc9574f 100644 --- a/src/services/kinesis.jl +++ b/src/services/kinesis.jl @@ -9,11 +9,11 @@ using AWS.UUIDs add_tags_to_stream(tags, params::Dict{String,<:Any}) Adds or updates tags for the specified Kinesis data stream. You can assign up to 50 tags to -a data stream. When invoking this API, it is recommended you use the StreamARN input -parameter rather than the StreamName input parameter. If tags have already been assigned -to the stream, AddTagsToStream overwrites any existing tags that correspond to the -specified tag keys. AddTagsToStream has a limit of five transactions per second per -account. +a data stream. When invoking this API, you must use either the StreamARN or the StreamName +parameter, or both. It is recommended that you use the StreamARN input parameter when you +invoke this API. If tags have already been assigned to the stream, AddTagsToStream +overwrites any existing tags that correspond to the specified tag keys. AddTagsToStream +has a limit of five transactions per second per account. # Arguments - `tags`: A set of up to 10 key-value pairs to use to create the tags. @@ -119,11 +119,11 @@ end Decreases the Kinesis data stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's -retention period is 24 hours. When invoking this API, it is recommended you use the -StreamARN input parameter rather than the StreamName input parameter. This operation may -result in lost data. For example, if the stream's retention period is 48 hours and is -decreased to 24 hours, any data already in the stream that is older than 24 hours is -inaccessible. +retention period is 24 hours. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. This operation may result in lost data. For example, +if the stream's retention period is 48 hours and is decreased to 24 hours, any data already +in the stream that is older than 24 hours is inaccessible. # Arguments - `retention_period_hours`: The new retention period of the stream, in hours. Must be less @@ -163,6 +163,44 @@ function decrease_stream_retention_period( ) end +""" + delete_resource_policy(resource_arn) + delete_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Delete a policy for the specified data stream or consumer. Request patterns can be one of +the following: Data stream pattern: arn:aws.*:kinesis:.*:d{12}:.*stream/S+ Consumer +pattern: +^(arn):aws.*:kinesis:.*:d{12}:.*stream/[a-zA-Z0-9_.-]+/consumer/[a-zA-Z0-9_.-]+:[0-9]+ + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the data stream or consumer. + +""" +function delete_resource_policy( + ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis( + "DeleteResourcePolicy", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_resource_policy( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis( + "DeleteResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stream() delete_stream(params::Dict{String,<:Any}) @@ -170,16 +208,17 @@ end Deletes a Kinesis data stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it receives the exception -ResourceNotFoundException. When invoking this API, it is recommended you use the StreamARN -input parameter rather than the StreamName input parameter. If the stream is in the ACTIVE -state, you can delete it. After a DeleteStream request, the specified stream is in the -DELETING state until Kinesis Data Streams completes the deletion. Note: Kinesis Data -Streams might continue to accept data read and write operations, such as PutRecord, -PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is -complete. When you delete a stream, any shards in that stream are also deleted, and any -tags are dissociated from the stream. You can use the DescribeStreamSummary operation to -check the state of the stream, which is returned in StreamStatus. DeleteStream has a limit -of five transactions per second per account. +ResourceNotFoundException. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. If the stream is in the ACTIVE state, you can delete +it. After a DeleteStream request, the specified stream is in the DELETING state until +Kinesis Data Streams completes the deletion. Note: Kinesis Data Streams might continue to +accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a +stream in the DELETING state until the stream deletion is complete. When you delete a +stream, any shards in that stream are also deleted, and any tags are dissociated from the +stream. You can use the DescribeStreamSummary operation to check the state of the stream, +which is returned in StreamStatus. DeleteStream has a limit of five transactions per +second per account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -267,19 +306,19 @@ end Describes the specified Kinesis data stream. This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the specified Kinesis data stream and the ListShards API to list the shards in a specified -data stream and obtain information about each shard. When invoking this API, it is -recommended you use the StreamARN input parameter rather than the StreamName input -parameter. The information returned includes the stream name, Amazon Resource Name (ARN), -creation time, enhanced metric configuration, and shard map. The shard map is an array of -shard objects. For each shard object, there is the hash key and sequence number ranges that -the shard spans, and the IDs of any earlier shards that played in a role in creating the -shard. Every record ingested in the stream is identified by a sequence number, which is -assigned when the record is put into the stream. You can limit the number of shards -returned by each call. For more information, see Retrieving Shards from a Stream in the -Amazon Kinesis Data Streams Developer Guide. There are no guarantees about the -chronological order shards returned. To process shards in chronological order, use the ID -of the parent shard to track the lineage to the oldest shard. This operation has a limit of -10 transactions per second per account. +data stream and obtain information about each shard. When invoking this API, you must +use either the StreamARN or the StreamName parameter, or both. It is recommended that you +use the StreamARN input parameter when you invoke this API. The information returned +includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric +configuration, and shard map. The shard map is an array of shard objects. For each shard +object, there is the hash key and sequence number ranges that the shard spans, and the IDs +of any earlier shards that played in a role in creating the shard. Every record ingested in +the stream is identified by a sequence number, which is assigned when the record is put +into the stream. You can limit the number of shards returned by each call. For more +information, see Retrieving Shards from a Stream in the Amazon Kinesis Data Streams +Developer Guide. There are no guarantees about the chronological order shards returned. To +process shards in chronological order, use the ID of the parent shard to track the lineage +to the oldest shard. This operation has a limit of 10 transactions per second per account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -314,7 +353,9 @@ consumer when you registered it. You may also provide all three parameters, as l don't conflict with each other. If you don't know the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. -This operation has a limit of 20 transactions per second per stream. +This operation has a limit of 20 transactions per second per stream. When making a +cross-account call with DescribeStreamConsumer, make sure to provide the ARN of the +consumer. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -346,11 +387,12 @@ end describe_stream_summary(params::Dict{String,<:Any}) Provides a summarized description of the specified Kinesis data stream without the shard -list. When invoking this API, it is recommended you use the StreamARN input parameter -rather than the StreamName input parameter. The information returned includes the stream -name, Amazon Resource Name (ARN), status, record retention period, approximate creation -time, monitoring, encryption details, and open shard count. DescribeStreamSummary has a -limit of 20 transactions per second per account. +list. When invoking this API, you must use either the StreamARN or the StreamName +parameter, or both. It is recommended that you use the StreamARN input parameter when you +invoke this API. The information returned includes the stream name, Amazon Resource Name +(ARN), status, record retention period, approximate creation time, monitoring, encryption +details, and open shard count. DescribeStreamSummary has a limit of 20 transactions per +second per account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -377,8 +419,9 @@ end disable_enhanced_monitoring(shard_level_metrics) disable_enhanced_monitoring(shard_level_metrics, params::Dict{String,<:Any}) -Disables enhanced monitoring. When invoking this API, it is recommended you use the -StreamARN input parameter rather than the StreamName input parameter. +Disables enhanced monitoring. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. # Arguments - `shard_level_metrics`: List of shard-level metrics to disable. The following are the @@ -427,8 +470,8 @@ end enable_enhanced_monitoring(shard_level_metrics, params::Dict{String,<:Any}) Enables enhanced Kinesis data stream monitoring for shard-level metrics. When invoking -this API, it is recommended you use the StreamARN input parameter rather than the -StreamName input parameter. +this API, you must use either the StreamARN or the StreamName parameter, or both. It is +recommended that you use the StreamARN input parameter when you invoke this API. # Arguments - `shard_level_metrics`: List of shard-level metrics to enable. The following are the valid @@ -475,35 +518,35 @@ end get_records(shard_iterator) get_records(shard_iterator, params::Dict{String,<:Any}) -Gets data records from a Kinesis data stream's shard. When invoking this API, it is -recommended you use the StreamARN input parameter in addition to the ShardIterator -parameter. Specify a shard iterator using the ShardIterator parameter. The shard iterator -specifies the position in the shard from which you want to start reading data records -sequentially. If there are no records available in the portion of the shard that the -iterator points to, GetRecords returns an empty list. It might take multiple calls to get -to a portion of the shard that contains records. You can scale by provisioning multiple -shards per stream while considering service limits (for more information, see Amazon -Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide). Your -application should have one thread per shard, each reading continuously from its stream. To -read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the -shard iterator to specify in the first GetRecords call. GetRecords returns a new shard -iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in -subsequent calls to GetRecords. If the shard has been closed, the shard iterator can't -return more data and GetRecords returns null in NextShardIterator. You can terminate the -loop when the shard is closed, or when the shard iterator reaches the record with the -sequence number or other attribute that marks it as the last record to process. Each data -record can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You can -ensure that your calls don't exceed the maximum supported size or throughput by using the -Limit parameter to specify the maximum number of records that GetRecords can return. -Consider your average record size when determining this limit. The maximum number of -records that can be returned per call is 10,000. The size of the data returned by -GetRecords varies depending on the utilization of the shard. It is recommended that -consumer applications retrieve records via the GetRecords command using the 5 TPS limit to -remain caught up. Retrieving records less frequently can lead to consumer applications -falling behind. The maximum size of data that GetRecords can return is 10 MiB. If a call -returns this amount of data, subsequent calls made within the next 5 seconds throw -ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on -the stream, subsequent calls made within the next 1 second throw +Gets data records from a Kinesis data stream's shard. When invoking this API, you must use +either the StreamARN or the StreamName parameter, or both. It is recommended that you use +the StreamARN input parameter when you invoke this API. Specify a shard iterator using the +ShardIterator parameter. The shard iterator specifies the position in the shard from which +you want to start reading data records sequentially. If there are no records available in +the portion of the shard that the iterator points to, GetRecords returns an empty list. It +might take multiple calls to get to a portion of the shard that contains records. You can +scale by provisioning multiple shards per stream while considering service limits (for more +information, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams +Developer Guide). Your application should have one thread per shard, each reading +continuously from its stream. To read from a stream continually, call GetRecords in a loop. +Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. +GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator +returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been +closed, the shard iterator can't return more data and GetRecords returns null in +NextShardIterator. You can terminate the loop when the shard is closed, or when the shard +iterator reaches the record with the sequence number or other attribute that marks it as +the last record to process. Each data record can be up to 1 MiB in size, and each shard can +read up to 2 MiB per second. You can ensure that your calls don't exceed the maximum +supported size or throughput by using the Limit parameter to specify the maximum number of +records that GetRecords can return. Consider your average record size when determining this +limit. The maximum number of records that can be returned per call is 10,000. The size of +the data returned by GetRecords varies depending on the utilization of the shard. It is +recommended that consumer applications retrieve records via the GetRecords command using +the 5 TPS limit to remain caught up. Retrieving records less frequently can lead to +consumer applications falling behind. The maximum size of data that GetRecords can return +is 10 MiB. If a call returns this amount of data, subsequent calls made within the next 5 +seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned +throughput on the stream, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. GetRecords doesn't return any data when it throws an exception. For this reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than @@ -555,32 +598,69 @@ function get_records( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Returns a policy attached to the specified data stream or consumer. Request patterns can be +one of the following: Data stream pattern: arn:aws.*:kinesis:.*:d{12}:.*stream/S+ +Consumer pattern: +^(arn):aws.*:kinesis:.*:d{12}:.*stream/[a-zA-Z0-9_.-]+/consumer/[a-zA-Z0-9_.-]+:[0-9]+ + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the data stream or consumer. + +""" +function get_resource_policy(ResourceARN; aws_config::AbstractAWSConfig=global_aws_config()) + return kinesis( + "GetResourcePolicy", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis( + "GetResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_shard_iterator(shard_id, shard_iterator_type) get_shard_iterator(shard_id, shard_iterator_type, params::Dict{String,<:Any}) Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is -returned to the requester. When invoking this API, it is recommended you use the StreamARN -input parameter rather than the StreamName input parameter. A shard iterator specifies the -shard position from which to start reading data records sequentially. The position is -specified using the sequence number of a data record in a shard. A sequence number is the -identifier associated with every record ingested in the stream, and is assigned when a -record is put into the stream. Each stream has one or more shards. You must specify the -shard iterator type. For example, you can set the ShardIteratorType parameter to read -exactly from the position denoted by a specific sequence number by using the -AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read right after -the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence -numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. -In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from -an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last -untrimmed record in the shard in the system (the oldest data record in the shard), or -LATEST so that you always read the most recent data in the shard. When you read repeatedly -from a stream, use a GetShardIterator request to get the first shard iterator for use in -your first GetRecords request and for subsequent reads use the shard iterator returned by -the GetRecords request in NextShardIterator. A new shard iterator is returned by every -GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of -the next GetRecords request. If a GetShardIterator request is made too often, you receive -a ProvisionedThroughputExceededException. For more information about throughput limits, see +returned to the requester. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. A shard iterator specifies the shard position from +which to start reading data records sequentially. The position is specified using the +sequence number of a data record in a shard. A sequence number is the identifier associated +with every record ingested in the stream, and is assigned when a record is put into the +stream. Each stream has one or more shards. You must specify the shard iterator type. For +example, you can set the ShardIteratorType parameter to read exactly from the position +denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. +Alternatively, the parameter can read right after the sequence number by using the +AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls +to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify +the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, +TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in +the system (the oldest data record in the shard), or LATEST so that you always read the +most recent data in the shard. When you read repeatedly from a stream, use a +GetShardIterator request to get the first shard iterator for use in your first GetRecords +request and for subsequent reads use the shard iterator returned by the GetRecords request +in NextShardIterator. A new shard iterator is returned by every GetRecords request in +NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords +request. If a GetShardIterator request is made too often, you receive a +ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Data Streams Developer Guide. If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. A shard can be closed as a result of using SplitShard or MergeShards. @@ -651,14 +731,15 @@ end Increases the Kinesis data stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's -retention period is 8760 hours (365 days). When invoking this API, it is recommended you -use the StreamARN input parameter rather than the StreamName input parameter. If you -choose a longer stream retention period, this operation increases the time period during -which records that have not yet expired are accessible. However, it does not make previous, -expired data (older than the stream's previous retention period) accessible after the -operation has been called. For example, if a stream's retention period is set to 24 hours -and is increased to 168 hours, any data that is older than 24 hours remains inaccessible to -consumer applications. +retention period is 8760 hours (365 days). When invoking this API, you must use either the +StreamARN or the StreamName parameter, or both. It is recommended that you use the +StreamARN input parameter when you invoke this API. If you choose a longer stream +retention period, this operation increases the time period during which records that have +not yet expired are accessible. However, it does not make previous, expired data (older +than the stream's previous retention period) accessible after the operation has been +called. For example, if a stream's retention period is set to 24 hours and is increased to +168 hours, any data that is older than 24 hours remains inaccessible to consumer +applications. # Arguments - `retention_period_hours`: The new retention period of the stream, in hours. Must be more @@ -703,14 +784,14 @@ end list_shards(params::Dict{String,<:Any}) Lists the shards in a stream and provides information about each shard. This operation has -a limit of 1000 transactions per second per data stream. When invoking this API, it is -recommended you use the StreamARN input parameter rather than the StreamName input -parameter. This action does not list expired shards. For information about expired shards, -see Data Routing, Data Persistence, and Shard State after a Reshard. This API is a new -operation that is used by the Amazon Kinesis Client Library (KCL). If you have a -fine-grained IAM policy that only allows specific operations, you must update your policy -to allow calls to this API. For more information, see Controlling Access to Amazon Kinesis -Data Streams Resources Using IAM. +a limit of 1000 transactions per second per data stream. When invoking this API, you must +use either the StreamARN or the StreamName parameter, or both. It is recommended that you +use the StreamARN input parameter when you invoke this API. This action does not list +expired shards. For information about expired shards, see Data Routing, Data Persistence, +and Shard State after a Reshard. This API is a new operation that is used by the Amazon +Kinesis Client Library (KCL). If you have a fine-grained IAM policy that only allows +specific operations, you must update your policy to allow calls to this API. For more +information, see Controlling Access to Amazon Kinesis Data Streams Resources Using IAM. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -872,8 +953,9 @@ end list_tags_for_stream(params::Dict{String,<:Any}) Lists the tags for the specified Kinesis data stream. This operation has a limit of five -transactions per second per account. When invoking this API, it is recommended you use the -StreamARN input parameter rather than the StreamName input parameter. +transactions per second per account. When invoking this API, you must use either the +StreamARN or the StreamName parameter, or both. It is recommended that you use the +StreamARN input parameter when you invoke this API. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -911,24 +993,25 @@ For example, if you have two shards, one with a hash key range of 276...381 and with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards. When -invoking this API, it is recommended you use the StreamARN input parameter rather than the -StreamName input parameter. MergeShards is called when there is a need to reduce the -overall capacity of a stream because of excess capacity that is not being used. You must -specify the shard to be merged and the adjacent shard for a stream. For more information -about merging shards, see Merge Two Shards in the Amazon Kinesis Data Streams Developer -Guide. If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in -the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If -the specified stream does not exist, MergeShards returns a ResourceNotFoundException. You -can use DescribeStreamSummary to check the state of the stream, which is returned in -StreamStatus. MergeShards is an asynchronous operation. Upon receiving a MergeShards -request, Amazon Kinesis Data Streams immediately returns a response and sets the -StreamStatus to UPDATING. After the operation is completed, Kinesis Data Streams sets the -StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in -the UPDATING state. You use DescribeStreamSummary and the ListShards APIs to determine the -shard IDs that are specified in the MergeShards request. If you try to operate on too many -streams in parallel using CreateStream, DeleteStream, MergeShards, or SplitShard, you -receive a LimitExceededException. MergeShards has a limit of five transactions per second -per account. +invoking this API, you must use either the StreamARN or the StreamName parameter, or both. +It is recommended that you use the StreamARN input parameter when you invoke this API. +MergeShards is called when there is a need to reduce the overall capacity of a stream +because of excess capacity that is not being used. You must specify the shard to be merged +and the adjacent shard for a stream. For more information about merging shards, see Merge +Two Shards in the Amazon Kinesis Data Streams Developer Guide. If the stream is in the +ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or +DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does +not exist, MergeShards returns a ResourceNotFoundException. You can use +DescribeStreamSummary to check the state of the stream, which is returned in StreamStatus. +MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon +Kinesis Data Streams immediately returns a response and sets the StreamStatus to UPDATING. +After the operation is completed, Kinesis Data Streams sets the StreamStatus to ACTIVE. +Read and write operations continue to work while the stream is in the UPDATING state. You +use DescribeStreamSummary and the ListShards APIs to determine the shard IDs that are +specified in the MergeShards request. If you try to operate on too many streams in +parallel using CreateStream, DeleteStream, MergeShards, or SplitShard, you receive a +LimitExceededException. MergeShards has a limit of five transactions per second per +account. # Arguments - `adjacent_shard_to_merge`: The shard ID of the adjacent shard for the merge. @@ -982,32 +1065,32 @@ end Writes a single data record into an Amazon Kinesis data stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write -total of 1 MiB per second. When invoking this API, it is recommended you use the StreamARN -input parameter rather than the StreamName input parameter. You must specify the name of -the stream that captures, stores, and transports the data; a partition key; and the data -blob itself. The data blob can be any type of data; for example, a segment from a log file, -geographic/location data, website clickstream data, and so on. The partition key is used by -Kinesis Data Streams to distribute data across shards. Kinesis Data Streams segregates the -data records that belong to a stream into multiple shards, using the partition key -associated with each data record to determine the shard to which a given data record -belongs. Partition keys are Unicode strings, with a maximum length limit of 256 characters -for each key. An MD5 hash function is used to map partition keys to 128-bit integer values -and to map associated data records to shards using the hash key ranges of the shards. You -can override hashing the partition key to determine the shard by explicitly specifying a -hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a -Stream in the Amazon Kinesis Data Streams Developer Guide. PutRecord returns the shard ID -of where the data record was placed and the sequence number that was assigned to the data -record. Sequence numbers increase over time and are specific to a shard within a stream, -not across all shards within a stream. To guarantee strictly increasing ordering, write -serially to a shard and use the SequenceNumberForOrdering parameter. For more information, -see Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide. After you -write a record to a stream, you cannot modify that record or its order within the stream. -If a PutRecord request cannot be processed because of insufficient provisioned throughput -on the shard involved in the request, PutRecord throws -ProvisionedThroughputExceededException. By default, data records are accessible for 24 -hours from the time that they are added to a stream. You can use -IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention -period. +total of 1 MiB per second. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. You must specify the name of the stream that captures, +stores, and transports the data; a partition key; and the data blob itself. The data blob +can be any type of data; for example, a segment from a log file, geographic/location data, +website clickstream data, and so on. The partition key is used by Kinesis Data Streams to +distribute data across shards. Kinesis Data Streams segregates the data records that belong +to a stream into multiple shards, using the partition key associated with each data record +to determine the shard to which a given data record belongs. Partition keys are Unicode +strings, with a maximum length limit of 256 characters for each key. An MD5 hash function +is used to map partition keys to 128-bit integer values and to map associated data records +to shards using the hash key ranges of the shards. You can override hashing the partition +key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey +parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Data +Streams Developer Guide. PutRecord returns the shard ID of where the data record was +placed and the sequence number that was assigned to the data record. Sequence numbers +increase over time and are specific to a shard within a stream, not across all shards +within a stream. To guarantee strictly increasing ordering, write serially to a shard and +use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a +Stream in the Amazon Kinesis Data Streams Developer Guide. After you write a record to a +stream, you cannot modify that record or its order within the stream. If a PutRecord +request cannot be processed because of insufficient provisioned throughput on the shard +involved in the request, PutRecord throws ProvisionedThroughputExceededException. By +default, data records are accessible for 24 hours from the time that they are added to a +stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to +modify this retention period. # Arguments - `data`: The data blob to put into the record, which is base64-encoded when the blob is @@ -1067,39 +1150,40 @@ end Writes multiple data records into a Kinesis data stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data -ingestion and processing. When invoking this API, it is recommended you use the StreamARN -input parameter rather than the StreamName input parameter. Each PutRecords request can -support up to 500 records. Each record in the request can be as large as 1 MiB, up to a -limit of 5 MiB for the entire request, including partition keys. Each shard can support -writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per -second. You must specify the name of the stream that captures, stores, and transports the -data; and an array of request Records, with each record in the array requiring a partition -key and data blob. The record size limit applies to the total size of the partition key and -data blob. The data blob can be any type of data; for example, a segment from a log file, -geographic/location data, website clickstream data, and so on. The partition key is used by -Kinesis Data Streams as input to a hash function that maps the partition key and associated -data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit -integer values and to map associated data records to shards. As a result of this hashing -mechanism, all data records with the same partition key map to the same shard within the -stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data -Streams Developer Guide. Each record in the Records array may include an optional -parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This -parameter allows a data producer to determine explicitly the shard where the record is -stored. For more information, see Adding Multiple Records with PutRecords in the Amazon -Kinesis Data Streams Developer Guide. The PutRecords response includes an array of response -Records. Each record in the response array directly correlates with a record in the request -array using natural ordering, from the top to the bottom of the request and response. The -response Records array always includes the same number of records as the request array. The -response Records array includes both successfully and unsuccessfully processed records. -Kinesis Data Streams attempts to process all records in each PutRecords request. A single -record failure does not stop the processing of subsequent records. As a result, PutRecords -doesn't guarantee the ordering of records. If you need to read records in the same order -they are written to the stream, use PutRecord instead of PutRecords, and write to the same -shard. A successfully processed record includes ShardId and SequenceNumber values. The -ShardId parameter identifies the shard in the stream where the record is stored. The -SequenceNumber parameter is an identifier assigned to the put record, unique to all records -in the stream. An unsuccessfully processed record includes ErrorCode and ErrorMessage -values. ErrorCode reflects the type of error and can be one of the following values: +ingestion and processing. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. Each PutRecords request can support up to 500 records. +Each record in the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire +request, including partition keys. Each shard can support writes up to 1,000 records per +second, up to a maximum data write total of 1 MiB per second. You must specify the name of +the stream that captures, stores, and transports the data; and an array of request Records, +with each record in the array requiring a partition key and data blob. The record size +limit applies to the total size of the partition key and data blob. The data blob can be +any type of data; for example, a segment from a log file, geographic/location data, website +clickstream data, and so on. The partition key is used by Kinesis Data Streams as input to +a hash function that maps the partition key and associated data to a specific shard. An MD5 +hash function is used to map partition keys to 128-bit integer values and to map associated +data records to shards. As a result of this hashing mechanism, all data records with the +same partition key map to the same shard within the stream. For more information, see +Adding Data to a Stream in the Amazon Kinesis Data Streams Developer Guide. Each record in +the Records array may include an optional parameter, ExplicitHashKey, which overrides the +partition key to shard mapping. This parameter allows a data producer to determine +explicitly the shard where the record is stored. For more information, see Adding Multiple +Records with PutRecords in the Amazon Kinesis Data Streams Developer Guide. The PutRecords +response includes an array of response Records. Each record in the response array directly +correlates with a record in the request array using natural ordering, from the top to the +bottom of the request and response. The response Records array always includes the same +number of records as the request array. The response Records array includes both +successfully and unsuccessfully processed records. Kinesis Data Streams attempts to process +all records in each PutRecords request. A single record failure does not stop the +processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering of +records. If you need to read records in the same order they are written to the stream, use +PutRecord instead of PutRecords, and write to the same shard. A successfully processed +record includes ShardId and SequenceNumber values. The ShardId parameter identifies the +shard in the stream where the record is stored. The SequenceNumber parameter is an +identifier assigned to the put record, unique to all records in the stream. An +unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode +reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more @@ -1137,6 +1221,59 @@ function put_records( ) end +""" + put_resource_policy(policy, resource_arn) + put_resource_policy(policy, resource_arn, params::Dict{String,<:Any}) + +Attaches a resource-based policy to a data stream or registered consumer. If you are using +an identity other than the root user of the Amazon Web Services account that owns the +resource, the calling identity must have the PutResourcePolicy permissions on the specified +Kinesis Data Streams resource and belong to the owner's account in order to use this +operation. If you don't have PutResourcePolicy permissions, Amazon Kinesis Data Streams +returns a 403 Access Denied error. If you receive a ResourceNotFoundException, check to see +if you passed a valid stream or consumer resource. Request patterns can be one of the +following: Data stream pattern: arn:aws.*:kinesis:.*:d{12}:.*stream/S+ Consumer +pattern: +^(arn):aws.*:kinesis:.*:d{12}:.*stream/[a-zA-Z0-9_.-]+/consumer/[a-zA-Z0-9_.-]+:[0-9]+ +For more information, see Controlling Access to Amazon Kinesis Data Streams Resources Using +IAM. + +# Arguments +- `policy`: Details of the resource policy. It must include the identity of the principal + and the actions allowed on this resource. This is formatted as a JSON string. +- `resource_arn`: The Amazon Resource Name (ARN) of the data stream or consumer. + +""" +function put_resource_policy( + Policy, ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis( + "PutResourcePolicy", + Dict{String,Any}("Policy" => Policy, "ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_resource_policy( + Policy, + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis( + "PutResourcePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Policy" => Policy, "ResourceARN" => ResourceARN), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_stream_consumer(consumer_name, stream_arn) register_stream_consumer(consumer_name, stream_arn, params::Dict{String,<:Any}) @@ -1196,10 +1333,11 @@ end remove_tags_from_stream(tag_keys, params::Dict{String,<:Any}) Removes tags from the specified Kinesis data stream. Removed tags are deleted and cannot be -recovered after this operation successfully completes. When invoking this API, it is -recommended you use the StreamARN input parameter rather than the StreamName input -parameter. If you specify a tag that does not exist, it is ignored. RemoveTagsFromStream -has a limit of five transactions per second per account. +recovered after this operation successfully completes. When invoking this API, you must +use either the StreamARN or the StreamName parameter, or both. It is recommended that you +use the StreamARN input parameter when you invoke this API. If you specify a tag that does +not exist, it is ignored. RemoveTagsFromStream has a limit of five transactions per second +per account. # Arguments - `tag_keys`: A list of tag keys. Each corresponding tag is removed from the stream. @@ -1236,33 +1374,33 @@ Splits a shard into two new shards in the Kinesis data stream, to increase the s capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested. This API is only supported for the data streams with the -provisioned capacity mode. When invoking this API, it is recommended you use the StreamARN -input parameter rather than the StreamName input parameter. You can also use SplitShard -when a shard appears to be approaching its maximum utilization; for example, the producers -sending data into the specific shard are suddenly sending more than previously anticipated. -You can also call SplitShard to increase stream capacity, so that more Kinesis Data Streams -applications can simultaneously read data from the stream for real-time processing. You -must specify the shard to be split and the new hash key, which is the position in the shard -where the shard gets split in two. In many cases, the new hash key might be the average of -the beginning and ending hash key, but it can be any hash key value in the range being -mapped into the shard. For more information, see Split a Shard in the Amazon Kinesis Data -Streams Developer Guide. You can use DescribeStreamSummary and the ListShards APIs to -determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey -parameters that are specified in the SplitShard request. SplitShard is an asynchronous -operation. Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a -response and sets the stream status to UPDATING. After the operation is completed, Kinesis -Data Streams sets the stream status to ACTIVE. Read and write operations continue to work -while the stream is in the UPDATING state. You can use DescribeStreamSummary to check the -status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE -state, you can call SplitShard. If the specified stream does not exist, -DescribeStreamSummary returns a ResourceNotFoundException. If you try to create more shards -than are authorized for your account, you receive a LimitExceededException. For the -default shard limit for an Amazon Web Services account, see Kinesis Data Streams Limits in -the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact Amazon Web -Services Support. If you try to operate on too many streams simultaneously using -CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a -LimitExceededException. SplitShard has a limit of five transactions per second per -account. +provisioned capacity mode. When invoking this API, you must use either the StreamARN or +the StreamName parameter, or both. It is recommended that you use the StreamARN input +parameter when you invoke this API. You can also use SplitShard when a shard appears to be +approaching its maximum utilization; for example, the producers sending data into the +specific shard are suddenly sending more than previously anticipated. You can also call +SplitShard to increase stream capacity, so that more Kinesis Data Streams applications can +simultaneously read data from the stream for real-time processing. You must specify the +shard to be split and the new hash key, which is the position in the shard where the shard +gets split in two. In many cases, the new hash key might be the average of the beginning +and ending hash key, but it can be any hash key value in the range being mapped into the +shard. For more information, see Split a Shard in the Amazon Kinesis Data Streams Developer +Guide. You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID +and hash key values for the ShardToSplit and NewStartingHashKey parameters that are +specified in the SplitShard request. SplitShard is an asynchronous operation. Upon +receiving a SplitShard request, Kinesis Data Streams immediately returns a response and +sets the stream status to UPDATING. After the operation is completed, Kinesis Data Streams +sets the stream status to ACTIVE. Read and write operations continue to work while the +stream is in the UPDATING state. You can use DescribeStreamSummary to check the status of +the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you +can call SplitShard. If the specified stream does not exist, DescribeStreamSummary returns +a ResourceNotFoundException. If you try to create more shards than are authorized for your +account, you receive a LimitExceededException. For the default shard limit for an Amazon +Web Services account, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams +Developer Guide. To increase this limit, contact Amazon Web Services Support. If you try to +operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, +and/or SplitShard, you receive a LimitExceededException. SplitShard has a limit of five +transactions per second per account. # Arguments - `new_starting_hash_key`: A hash key value for the starting hash key of one of the child @@ -1318,8 +1456,10 @@ end start_stream_encryption(encryption_type, key_id, params::Dict{String,<:Any}) Enables or updates server-side encryption using an Amazon Web Services KMS key for a -specified stream. Starting encryption is an asynchronous operation. Upon receiving the -request, Kinesis Data Streams returns immediately and sets the status of the stream to +specified stream. When invoking this API, you must use either the StreamARN or the +StreamName parameter, or both. It is recommended that you use the StreamARN input parameter +when you invoke this API. Starting encryption is an asynchronous operation. Upon receiving +the request, Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete, but it can take minutes. You can continue to read and write data to your stream while its @@ -1328,9 +1468,7 @@ written to the stream. API Limits: You can successfully apply a new Amazon Web KMS key for server-side encryption 25 times in a rolling 24-hour period. Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you enable encryption, you can verify that encryption is -applied by inspecting the API response from PutRecord or PutRecords. When invoking this -API, it is recommended you use the StreamARN input parameter rather than the StreamName -input parameter. +applied by inspecting the API response from PutRecord or PutRecords. # Arguments - `encryption_type`: The encryption type to use. The only valid value is KMS. @@ -1383,19 +1521,20 @@ end stop_stream_encryption(encryption_type, key_id) stop_stream_encryption(encryption_type, key_id, params::Dict{String,<:Any}) -Disables server-side encryption for a specified stream. When invoking this API, it is -recommended you use the StreamARN input parameter rather than the StreamName input -parameter. Stopping encryption is an asynchronous operation. Upon receiving the request, -Kinesis Data Streams returns immediately and sets the status of the stream to UPDATING. -After the update is complete, Kinesis Data Streams sets the status of the stream back to -ACTIVE. Stopping encryption normally takes a few seconds to complete, but it can take -minutes. You can continue to read and write data to your stream while its status is -UPDATING. Once the status of the stream is ACTIVE, records written to the stream are no -longer encrypted by Kinesis Data Streams. API Limits: You can successfully disable -server-side encryption 25 times in a rolling 24-hour period. Note: It can take up to 5 -seconds after the stream is in an ACTIVE status before all records written to the stream -are no longer subject to encryption. After you disabled encryption, you can verify that -encryption is not applied by inspecting the API response from PutRecord or PutRecords. +Disables server-side encryption for a specified stream. When invoking this API, you must +use either the StreamARN or the StreamName parameter, or both. It is recommended that you +use the StreamARN input parameter when you invoke this API. Stopping encryption is an +asynchronous operation. Upon receiving the request, Kinesis Data Streams returns +immediately and sets the status of the stream to UPDATING. After the update is complete, +Kinesis Data Streams sets the status of the stream back to ACTIVE. Stopping encryption +normally takes a few seconds to complete, but it can take minutes. You can continue to read +and write data to your stream while its status is UPDATING. Once the status of the stream +is ACTIVE, records written to the stream are no longer encrypted by Kinesis Data Streams. +API Limits: You can successfully disable server-side encryption 25 times in a rolling +24-hour period. Note: It can take up to 5 seconds after the stream is in an ACTIVE status +before all records written to the stream are no longer subject to encryption. After you +disabled encryption, you can verify that encryption is not applied by inspecting the API +response from PutRecord or PutRecords. # Arguments - `encryption_type`: The encryption type. The only valid value is KMS. @@ -1450,28 +1589,29 @@ end Updates the shard count of the specified stream to the specified number of shards. This API is only supported for the data streams with the provisioned capacity mode. When invoking -this API, it is recommended you use the StreamARN input parameter rather than the -StreamName input parameter. Updating the shard count is an asynchronous operation. Upon -receiving the request, Kinesis Data Streams returns immediately and sets the status of the -stream to UPDATING. After the update is complete, Kinesis Data Streams sets the status of -the stream back to ACTIVE. Depending on the size of the stream, the scaling action could -take a few minutes to complete. You can continue to read and write data to your stream -while its status is UPDATING. To update the shard count, Kinesis Data Streams performs -splits or merges on individual shards. This can cause short-lived shards to be created, in -addition to the final shards. These short-lived shards count towards your total shard limit -for your account in the Region. When using this operation, we recommend that you specify a -target shard count that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any -target value within your shard limit. However, if you specify a target that isn't a -multiple of 25%, the scaling action might take longer to complete. This operation has the -following default limits. By default, you cannot do the following: Scale more than ten -times per rolling 24-hour period per stream Scale up to more than double your current -shard count for a stream Scale down below half your current shard count for a stream -Scale up to more than 10000 shards in a stream Scale a stream with more than 10000 shards -down unless the result is less than 10000 shards Scale up to more than the shard limit -for your account For the default limits for an Amazon Web Services account, see Streams -Limits in the Amazon Kinesis Data Streams Developer Guide. To request an increase in the -call rate limit, the shard limit for this API, or your overall shard limit, use the limits -form. +this API, you must use either the StreamARN or the StreamName parameter, or both. It is +recommended that you use the StreamARN input parameter when you invoke this API. Updating +the shard count is an asynchronous operation. Upon receiving the request, Kinesis Data +Streams returns immediately and sets the status of the stream to UPDATING. After the update +is complete, Kinesis Data Streams sets the status of the stream back to ACTIVE. Depending +on the size of the stream, the scaling action could take a few minutes to complete. You can +continue to read and write data to your stream while its status is UPDATING. To update the +shard count, Kinesis Data Streams performs splits or merges on individual shards. This can +cause short-lived shards to be created, in addition to the final shards. These short-lived +shards count towards your total shard limit for your account in the Region. When using this +operation, we recommend that you specify a target shard count that is a multiple of 25% +(25%, 50%, 75%, 100%). You can specify any target value within your shard limit. However, +if you specify a target that isn't a multiple of 25%, the scaling action might take longer +to complete. This operation has the following default limits. By default, you cannot do +the following: Scale more than ten times per rolling 24-hour period per stream Scale up +to more than double your current shard count for a stream Scale down below half your +current shard count for a stream Scale up to more than 10000 shards in a stream Scale a +stream with more than 10000 shards down unless the result is less than 10000 shards Scale +up to more than the shard limit for your account Make over 10 TPS. TPS over 10 will +trigger the LimitExceededException For the default limits for an Amazon Web Services +account, see Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To request +an increase in the call rate limit, the shard limit for this API, or your overall shard +limit, use the limits form. # Arguments - `scaling_type`: The scaling type. Uniform scaling creates shards of equal size. diff --git a/src/services/kinesis_analytics_v2.jl b/src/services/kinesis_analytics_v2.jl index b5b49ffb69..d8f45ba1f9 100644 --- a/src/services/kinesis_analytics_v2.jl +++ b/src/services/kinesis_analytics_v2.jl @@ -22,7 +22,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the application's current ConditionalToken using DescribeApplication. For better concurrency support, use the ConditionalToken parameter instead of CurrentApplicationVersionId. -- `"CurrentApplicationVersionId"`: The version ID of the Kinesis Data Analytics +- `"CurrentApplicationVersionId"`: The version ID of the SQL-based Kinesis Data Analytics application. You must provide the CurrentApplicationVersionId or the ConditionalToken.You can retrieve the application version ID using DescribeApplication. For better concurrency support, use the ConditionalToken parameter instead of CurrentApplicationVersionId. @@ -337,10 +337,10 @@ end Adds a Virtual Private Cloud (VPC) configuration to the application. Applications can use VPCs to store and access resources securely. Note the following about VPC configurations -for Kinesis Data Analytics applications: VPC configurations are not supported for SQL -applications. When a VPC is added to a Kinesis Data Analytics application, the -application can no longer be accessed from the Internet directly. To enable Internet access -to the application, add an Internet gateway to your VPC. +for Managed Service for Apache Flink applications: VPC configurations are not supported +for SQL applications. When a VPC is added to a Managed Service for Apache Flink +application, the application can no longer be accessed from the Internet directly. To +enable Internet access to the application, add an Internet gateway to your VPC. # Arguments - `application_name`: The name of an existing application. @@ -398,8 +398,8 @@ end create_application(application_name, runtime_environment, service_execution_role) create_application(application_name, runtime_environment, service_execution_role, params::Dict{String,<:Any}) -Creates a Kinesis Data Analytics application. For information about creating a Kinesis Data -Analytics application, see Creating an Application. +Creates a Managed Service for Apache Flink application. For information about creating a +Managed Service for Apache Flink application, see Creating an Application. # Arguments - `application_name`: The name of your application (for example, sample-app). @@ -412,8 +412,9 @@ Analytics application, see Creating an Application. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ApplicationConfiguration"`: Use this parameter to configure the application. - `"ApplicationDescription"`: A summary description of the application. -- `"ApplicationMode"`: Use the STREAMING mode to create a Kinesis Data Analytics For Flink - application. To create a Kinesis Data Analytics Studio notebook, use the INTERACTIVE mode. +- `"ApplicationMode"`: Use the STREAMING mode to create a Managed Service for Apache Flink + application. To create a Managed Service for Apache Flink Studio notebook, use the + INTERACTIVE mode. - `"CloudWatchLoggingOptions"`: Use this parameter to configure an Amazon CloudWatch log stream to monitor application configuration errors. - `"Tags"`: A list of one or more tags to assign to the application. A tag is a key-value @@ -569,8 +570,8 @@ end delete_application(application_name, create_timestamp) delete_application(application_name, create_timestamp, params::Dict{String,<:Any}) -Deletes the specified application. Kinesis Data Analytics halts application execution and -deletes the application. +Deletes the specified application. Managed Service for Apache Flink halts application +execution and deletes the application. # Arguments - `application_name`: The name of the application to delete. @@ -616,7 +617,8 @@ end delete_application_cloud_watch_logging_option(application_name, cloud_watch_logging_option_id) delete_application_cloud_watch_logging_option(application_name, cloud_watch_logging_option_id, params::Dict{String,<:Any}) -Deletes an Amazon CloudWatch log stream from an Kinesis Data Analytics application. +Deletes an Amazon CloudWatch log stream from an SQL-based Kinesis Data Analytics +application. # Arguments - `application_name`: The application name. @@ -914,7 +916,7 @@ end delete_application_vpc_configuration(application_name, vpc_configuration_id) delete_application_vpc_configuration(application_name, vpc_configuration_id, params::Dict{String,<:Any}) -Removes a VPC configuration from a Kinesis Data Analytics application. +Removes a VPC configuration from a Managed Service for Apache Flink application. # Arguments - `application_name`: The name of an existing application. @@ -971,16 +973,17 @@ end describe_application(application_name) describe_application(application_name, params::Dict{String,<:Any}) -Returns information about a specific Kinesis Data Analytics application. If you want to -retrieve a list of all applications in your account, use the ListApplications operation. +Returns information about a specific Managed Service for Apache Flink application. If you +want to retrieve a list of all applications in your account, use the ListApplications +operation. # Arguments - `application_name`: The name of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"IncludeAdditionalDetails"`: Displays verbose information about a Kinesis Data Analytics - application, including the application's job plan. +- `"IncludeAdditionalDetails"`: Displays verbose information about a Managed Service for + Apache Flink application, including the application's job plan. """ function describe_application( ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1061,7 +1064,7 @@ end Provides a detailed description of a specified version of the application. To see a list of all the versions of an application, invoke the ListApplicationVersions operation. This -operation is supported only for Amazon Kinesis Data Analytics for Apache Flink. +operation is supported only for Managed Service for Apache Flink. # Arguments - `application_name`: The name of the application for which you want to get the version @@ -1126,7 +1129,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"InputProcessingConfiguration"`: The InputProcessingConfiguration to use to preprocess the records before discovering the schema of the records. - `"InputStartingPositionConfiguration"`: The point at which you want Kinesis Data - Analytics to start reading records from the specified streaming source discovery purposes. + Analytics to start reading records from the specified streaming source for discovery + purposes. - `"ResourceARN"`: The Amazon Resource Name (ARN) of the streaming source. - `"S3Configuration"`: Specify this parameter to discover a schema from data in an Amazon S3 object. @@ -1210,8 +1214,8 @@ end Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version. To get the complete description of a specific application version, invoke the -DescribeApplicationVersion operation. This operation is supported only for Amazon Kinesis -Data Analytics for Apache Flink. +DescribeApplicationVersion operation. This operation is supported only for Managed Service +for Apache Flink. # Arguments - `application_name`: The name of the application for which you want to list all versions. @@ -1254,7 +1258,7 @@ end list_applications() list_applications(params::Dict{String,<:Any}) -Returns a list of Kinesis Data Analytics applications in your account. For each +Returns a list of Managed Service for Apache Flink applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status. If you want detailed information about a specific application, use DescribeApplication. @@ -1322,9 +1326,9 @@ end Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status. You can roll back an application only if it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads -state data from the last successful snapshot. If the application has no snapshots, Kinesis -Data Analytics rejects the rollback request. This action is not supported for Kinesis Data -Analytics for SQL applications. +state data from the last successful snapshot. If the application has no snapshots, Managed +Service for Apache Flink rejects the rollback request. This action is not supported for +Managed Service for Apache Flink for SQL applications. # Arguments - `application_name`: The name of the application. @@ -1374,16 +1378,16 @@ end start_application(application_name) start_application(application_name, params::Dict{String,<:Any}) -Starts the specified Kinesis Data Analytics application. After creating an application, you -must exclusively call this operation to start your application. +Starts the specified Managed Service for Apache Flink application. After creating an +application, you must exclusively call this operation to start your application. # Arguments - `application_name`: The name of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"RunConfiguration"`: Identifies the run configuration (start parameters) of a Kinesis - Data Analytics application. +- `"RunConfiguration"`: Identifies the run configuration (start parameters) of a Managed + Service for Apache Flink application. """ function start_application( ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1418,8 +1422,8 @@ end Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force parameter to true. You can use the -DescribeApplication operation to find the application status. Kinesis Data Analytics takes -a snapshot when the application is stopped, unless Force is set to true. +DescribeApplication operation to find the application status. Managed Service for Apache +Flink takes a snapshot when the application is stopped, unless Force is set to true. # Arguments - `application_name`: The name of the running application to stop. @@ -1427,12 +1431,13 @@ a snapshot when the application is stopped, unless Force is set to true. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Force"`: Set to true to force the application to stop. If you set Force to true, - Kinesis Data Analytics stops the application without taking a snapshot. Force-stopping - your application may lead to data loss or duplication. To prevent data loss or duplicate - processing of data during application restarts, we recommend you to take frequent snapshots - of your application. You can only force stop a Flink-based Kinesis Data Analytics - application. You can't force stop a SQL-based Kinesis Data Analytics application. The - application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING, or RUNNING status. + Managed Service for Apache Flink stops the application without taking a snapshot. + Force-stopping your application may lead to data loss or duplication. To prevent data loss + or duplicate processing of data during application restarts, we recommend you to take + frequent snapshots of your application. You can only force stop a Managed Service for + Apache Flink application. You can't force stop a SQL-based Kinesis Data Analytics + application. The application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING, or + RUNNING status. """ function stop_application( ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1465,9 +1470,9 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds one or more key-value tags to a Kinesis Data Analytics application. Note that the -maximum number of application tags includes system tags. The maximum number of user-defined -application tags is 50. For more information, see Using Tagging. +Adds one or more key-value tags to a Managed Service for Apache Flink application. Note +that the maximum number of application tags includes system tags. The maximum number of +user-defined application tags is 50. For more information, see Using Tagging. # Arguments - `resource_arn`: The ARN of the application to assign the tags. @@ -1506,12 +1511,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes one or more tags from a Kinesis Data Analytics application. For more information, -see Using Tagging. +Removes one or more tags from a Managed Service for Apache Flink application. For more +information, see Using Tagging. # Arguments -- `resource_arn`: The ARN of the Kinesis Data Analytics application from which to remove - the tags. +- `resource_arn`: The ARN of the Managed Service for Apache Flink application from which to + remove the tags. - `tag_keys`: A list of keys of tags to remove from the specified application. """ @@ -1549,11 +1554,10 @@ end update_application(application_name) update_application(application_name, params::Dict{String,<:Any}) -Updates an existing Kinesis Data Analytics application. Using this operation, you can -update application code, input configuration, and output configuration. Kinesis Data -Analytics updates the ApplicationVersionId each time you update your application. You -cannot update the RuntimeEnvironment of an existing application. If you need to update an -application's RuntimeEnvironment, you must delete the application and create it again. +Updates an existing Managed Service for Apache Flink application. Using this operation, you +can update application code, input configuration, and output configuration. Managed +Service for Apache Flink updates the ApplicationVersionId each time you update your +application. # Arguments - `application_name`: The name of the application to update. @@ -1574,6 +1578,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys version ID using DescribeApplication. For better concurrency support, use the ConditionalToken parameter instead of CurrentApplicationVersionId. - `"RunConfigurationUpdate"`: Describes updates to the application's starting parameters. +- `"RuntimeEnvironmentUpdate"`: Updates the Managed Service for Apache Flink runtime + environment used to run your code. To avoid issues you must: Ensure your new jar and + dependencies are compatible with the new runtime selected. Ensure your new code's state + is compatible with the snapshot from which your application will start - `"ServiceExecutionRoleUpdate"`: Describes updates to the service execution role. """ function update_application( @@ -1607,9 +1615,9 @@ end update_application_maintenance_configuration(application_maintenance_configuration_update, application_name) update_application_maintenance_configuration(application_maintenance_configuration_update, application_name, params::Dict{String,<:Any}) -Updates the maintenance configuration of the Kinesis Data Analytics application. You can -invoke this operation on an application that is in one of the two following states: READY -or RUNNING. If you invoke it when the application is in a state other than these two +Updates the maintenance configuration of the Managed Service for Apache Flink application. +You can invoke this operation on an application that is in one of the two following states: +READY or RUNNING. If you invoke it when the application is in a state other than these two states, it throws a ResourceInUseException. The service makes use of the updated configuration the next time it schedules maintenance for the application. If you invoke this operation after the service schedules maintenance, the service will apply the @@ -1618,8 +1626,8 @@ that you might not see the maintenance configuration update applied to the maint process that follows a successful invocation of this operation, but to the following maintenance process instead. To see the current maintenance configuration of your application, invoke the DescribeApplication operation. For information about application -maintenance, see Kinesis Data Analytics for Apache Flink Maintenance. This operation is -supported only for Amazon Kinesis Data Analytics for Apache Flink. +maintenance, see Managed Service for Apache Flink for Apache Flink Maintenance. This +operation is supported only for Managed Service for Apache Flink. # Arguments - `application_maintenance_configuration_update`: Describes the application maintenance diff --git a/src/services/kinesis_video.jl b/src/services/kinesis_video.jl index 90cbc49462..76d3ca598d 100644 --- a/src/services/kinesis_video.jl +++ b/src/services/kinesis_video.jl @@ -75,7 +75,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys implementation, Kinesis Video Streams does not use this name. - `"KmsKeyId"`: The ID of the Key Management Service (KMS) key that you want Kinesis Video Streams to use to encrypt stream data. If no key ID is specified, the default, Kinesis - Video-managed key (aws/kinesisvideo) is used. For more information, see DescribeKey. + Video-managed key (Amazon Web Services/kinesisvideo) is used. For more information, see + DescribeKey. - `"MediaType"`: The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements for guidelines. @@ -109,6 +110,45 @@ function create_stream( ) end +""" + delete_edge_configuration() + delete_edge_configuration(params::Dict{String,<:Any}) + +An asynchronous API that deletes a stream’s existing edge configuration, as well as the +corresponding media from the Edge Agent. When you invoke this API, the sync status is set +to DELETING. A deletion process starts, in which active edge jobs are stopped and all media +is deleted from the edge device. The time to delete varies, depending on the total amount +of stored media. If the deletion process fails, the sync status changes to DELETE_FAILED. +You will need to re-try the deletion. When the deletion process has completed successfully, +the edge configuration is no longer accessible. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamARN"`: The Amazon Resource Name (ARN) of the stream. Specify either the + StreamName or the StreamARN. +- `"StreamName"`: The name of the stream from which to delete the edge configuration. + Specify either the StreamName or the StreamARN. +""" +function delete_edge_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return kinesis_video( + "POST", + "/deleteEdgeConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_edge_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/deleteEdgeConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_signaling_channel(channel_arn) delete_signaling_channel(channel_arn, params::Dict{String,<:Any}) @@ -205,8 +245,10 @@ end describe_edge_configuration(params::Dict{String,<:Any}) Describes a stream’s edge configuration that was set using the -StartEdgeConfigurationUpdate API. Use this API to get the status of the configuration if -the configuration is in sync with the Edge Agent. +StartEdgeConfigurationUpdate API and the latest status of the edge agent's recorder and +uploader jobs. Use this API to get the status of the configuration to determine if the +configuration is in sync with the Edge Agent. Use this API to evaluate the health of the +Edge Agent. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -275,9 +317,8 @@ end describe_mapped_resource_configuration() describe_mapped_resource_configuration(params::Dict{String,<:Any}) -Returns the most current information about the stream. Either streamName or streamARN -should be provided in the input. Returns the most current information about the stream. The -streamName or streamARN should be provided in the input. +Returns the most current information about the stream. The streamName or streamARN should +be provided in the input. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -531,6 +572,52 @@ function get_signaling_channel_endpoint( ) end +""" + list_edge_agent_configurations(hub_device_arn) + list_edge_agent_configurations(hub_device_arn, params::Dict{String,<:Any}) + +Returns an array of edge configurations associated with the specified Edge Agent. In the +request, you must specify the Edge Agent HubDeviceArn. + +# Arguments +- `hub_device_arn`: The \"Internet of Things (IoT) Thing\" Arn of the edge agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of edge configurations to return in the response. The + default is 5. +- `"NextToken"`: If you specify this parameter, when the result of a + ListEdgeAgentConfigurations operation is truncated, the call returns the NextToken in the + response. To get another batch of edge configurations, provide this token in your next + request. +""" +function list_edge_agent_configurations( + HubDeviceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/listEdgeAgentConfigurations", + Dict{String,Any}("HubDeviceArn" => HubDeviceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_edge_agent_configurations( + HubDeviceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_video( + "POST", + "/listEdgeAgentConfigurations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("HubDeviceArn" => HubDeviceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_signaling_channels() list_signaling_channels(params::Dict{String,<:Any}) @@ -693,7 +780,10 @@ status will be set to SYNCING. You will have to wait for the sync status to reac terminal state such as: IN_SYNC, or SYNC_FAILED, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException will be thrown. The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 -minutes. After 15 minutes, the status will transition into the SYNC_FAILED state. +minutes. After 15 minutes, the status will transition into the SYNC_FAILED state. To move +an edge configuration from one device to another, use DeleteEdgeConfiguration to delete the +current edge configuration. You can then invoke StartEdgeConfigurationUpdate with an +updated Hub Device ARN. # Arguments - `edge_config`: The edge configuration details required to invoke the update process. @@ -914,24 +1004,26 @@ end update_data_retention(current_version, data_retention_change_in_hours, operation) update_data_retention(current_version, data_retention_change_in_hours, operation, params::Dict{String,<:Any}) - Increases or decreases the stream's data retention period by the value that you specify. -To indicate whether you want to increase or decrease the data retention period, specify the +Increases or decreases the stream's data retention period by the value that you specify. To +indicate whether you want to increase or decrease the data retention period, specify the Operation parameter in the request body. In the request, you must specify either the -StreamName or the StreamARN. The retention period that you specify replaces the current -value. This operation requires permission for the KinesisVideo:UpdateDataRetention action. -Changing the data retention period affects the data in the stream as follows: If the data -retention period is increased, existing data is retained for the new retention period. For -example, if the data retention period is increased from one hour to seven hours, all -existing data is retained for seven hours. If the data retention period is decreased, -existing data is retained for the new retention period. For example, if the data retention -period is decreased from seven hours to one hour, all existing data is retained for one -hour, and any data older than one hour is deleted immediately. +StreamName or the StreamARN. This operation requires permission for the +KinesisVideo:UpdateDataRetention action. Changing the data retention period affects the +data in the stream as follows: If the data retention period is increased, existing data +is retained for the new retention period. For example, if the data retention period is +increased from one hour to seven hours, all existing data is retained for seven hours. If +the data retention period is decreased, existing data is retained for the new retention +period. For example, if the data retention period is decreased from seven hours to one +hour, all existing data is retained for one hour, and any data older than one hour is +deleted immediately. # Arguments - `current_version`: The version of the stream whose retention period you want to change. To get the version, call either the DescribeStream or the ListStreams API. -- `data_retention_change_in_hours`: The retention period, in hours. The value you specify - replaces the current value. The maximum value for this parameter is 87600 (ten years). +- `data_retention_change_in_hours`: The number of hours to adjust the current retention by. + The value you specify is added to or subtracted from the current value, depending on the + operation. The minimum value for data retention is 0 and the maximum value is 87600 (ten + years). - `operation`: Indicates whether you want to increase or decrease the retention period. # Optional Parameters @@ -1028,9 +1120,13 @@ end update_media_storage_configuration(channel_arn, media_storage_configuration, params::Dict{String,<:Any}) Associates a SignalingChannel to a stream to store the media. There are two signaling modes -that can specified : If the StorageStatus is disabled, no data will be stored, and the -StreamARN parameter will not be needed. If the StorageStatus is enabled, the data will -be stored in the StreamARN provided. +that you can specify : If StorageStatus is enabled, the data will be stored in the +StreamARN provided. In order for WebRTC Ingestion to work, the stream must have data +retention enabled. If StorageStatus is disabled, no data will be stored, and the +StreamARN parameter will not be needed. If StorageStatus is enabled, direct +peer-to-peer (master-viewer) connections no longer occur. Peers connect directly to the +storage session. You must call the JoinStorageSession API to trigger an SDP offer send and +establish a connection between a peer and the storage session. # Arguments - `channel_arn`: The Amazon Resource Name (ARN) of the channel. diff --git a/src/services/kinesis_video_archived_media.jl b/src/services/kinesis_video_archived_media.jl index e3439f3251..b7c74cae59 100644 --- a/src/services/kinesis_video_archived_media.jl +++ b/src/services/kinesis_video_archived_media.jl @@ -26,7 +26,8 @@ the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format. You the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams -Pricing and AWS Pricing. Charges for outgoing AWS data apply. +Pricing and Amazon Web Services Pricing. Charges for outgoing Amazon Web Services data +apply. # Arguments - `clip_fragment_selector`: The time range of the requested clip and the source of the @@ -94,15 +95,15 @@ protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH). Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token -with the same measures that you use with your AWS credentials. The media that is made -available through the manifest consists only of the requested stream, time range, and -format. No other media data (such as frames outside the requested window or alternate -bitrates) is made available. Provide the URL (containing the encrypted session token) for -the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis -Video Streams makes the initialization fragment and media fragments available through the -manifest URL. The initialization fragment contains the codec private data for the stream, -and other data needed to set up the video or audio decoder and renderer. The media -fragments contain encoded video frames or encoded audio samples. The media player +with the same measures that you use with your Amazon Web Services credentials. The media +that is made available through the manifest consists only of the requested stream, time +range, and format. No other media data (such as frames outside the requested window or +alternate bitrates) is made available. Provide the URL (containing the encrypted session +token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. +Kinesis Video Streams makes the initialization fragment and media fragments available +through the manifest URL. The initialization fragment contains the codec private data for +the stream, and other data needed to set up the video or audio decoder and renderer. The +media fragments contain encoded video frames or encoded audio samples. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions: GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to @@ -124,18 +125,19 @@ audio and video. Data retrieved with this action is billable. See Pricing for d can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For -pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for -both HLS sessions and outgoing AWS data apply. For more information about HLS, see HTTP -Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis -Video Streams archived media API, in addition to the HTTP status code and the response -body, it includes the following pieces of information: x-amz-ErrorType HTTP header – -contains a more specific error type in addition to what the HTTP status code provides. -x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can -better diagnose the problem if given the Request Id. Both the HTTP status code and the -ErrorType header can be utilized to make programmatic decisions about whether errors are -retry-able and under what conditions, as well as provide information on what actions the -client programmer might need to take in order to successfully try again. For more -information, see the Errors section at the bottom of this topic, as well as Common Errors. +pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services +Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply. For +more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an +error is thrown after invoking a Kinesis Video Streams archived media API, in addition to +the HTTP status code and the response body, it includes the following pieces of +information: x-amz-ErrorType HTTP header – contains a more specific error type in +addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you +want to report an issue to Amazon Web Services the support team can better diagnose the +problem if given the Request Id. Both the HTTP status code and the ErrorType header can +be utilized to make programmatic decisions about whether errors are retry-able and under +what conditions, as well as provide information on what actions the client programmer might +need to take in order to successfully try again. For more information, see the Errors +section at the bottom of this topic, as well as Common Errors. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -242,10 +244,11 @@ Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the browser or media player to view the stream contents. Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation. An Amazon Kinesis video stream has the following requirements -for providing data through HLS: The media must contain h.264 or h.265 encoded video and, -optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be -V_MPEG/ISO/AVC (for h.264) or V_MPEG/ISO/HEVC (for h.265). Optionally, the codec ID of -track 2 should be A_AAC. Data retention must be greater than 0. The video track of each +for providing data through HLS: For streaming video, the media must contain H.264 or +H.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track +1 should be V_MPEG/ISO/AVC (for H.264) or V_MPEG/ISO/HEVC (for H.265). Optionally, the +codec ID of track 2 should be A_AAC. For audio only streaming, the codec ID of track 1 +should be A_AAC. Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track @@ -262,63 +265,68 @@ returns an authenticated URL (that includes an encrypted session token) for the HLS master playlist (the root resource needed for streaming with HLS). Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use -with your AWS credentials. The media that is made available through the playlist consists -only of the requested stream, time range, and format. No other media data (such as frames -outside the requested window or alternate bitrates) is made available. Provide the URL -(containing the encrypted session token) for the HLS master playlist to a media player that -supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, -initialization fragment, and media fragments available through the master playlist URL. The -initialization fragment contains the codec private data for the stream, and other data -needed to set up the video or audio decoder and renderer. The media fragments contain -H.264-encoded video frames or AAC-encoded audio samples. The media player receives the -authenticated URL and requests stream metadata and media data normally. When the media -player requests data, it calls the following actions: GetHLSMasterPlaylist: Retrieves an -HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each -track, and additional metadata for the media player, including estimated bitrate and -resolution. GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL -to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to -access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist -also contains metadata about the stream that the player needs to play it, such as whether -the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for -sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated -with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media -playlist for the video track and the audio track (if applicable) that contains MP4 media -URLs for the specific track. GetMP4InitFragment: Retrieves the MP4 initialization -fragment. The media player typically loads the initialization fragment before loading any -media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child -atoms that are needed to initialize the media player decoder. The initialization fragment -does not correspond to a fragment in a Kinesis video stream. It contains only the codec -private data for the stream and respective track, which the media player needs to decode -the media frames. GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments -contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded -fragment's media frames and their timestamps. After the first media fragment is made -available in a streaming session, any fragments that don't contain the same codec private -data cause an error to be returned when those different media fragments are loaded. -Therefore, the codec private data should not change between fragments in a session. This -also means that the session fails if the fragments in a stream change from having only -video to having both audio and video. Data retrieved with this action is billable. See -Pricing for details. GetTSFragment: Retrieves MPEG TS fragments containing both -initialization and media data for all tracks in the stream. If the ContainerFormat is -MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve -stream media. Data retrieved with this action is billable. For more information, see -Kinesis Video Streams pricing. A streaming session URL must not be shared between -players. The service might throttle a session if multiple media players are sharing it. For -connection limits, see Kinesis Video Streams Limits. You can monitor the amount of data -that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon -CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, -see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video -Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply. -For more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an +with your Amazon Web Services credentials. The media that is made available through the +playlist consists only of the requested stream, time range, and format. No other media data +(such as frames outside the requested window or alternate bitrates) is made available. +Provide the URL (containing the encrypted session token) for the HLS master playlist to a +media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media +playlist, initialization fragment, and media fragments available through the master +playlist URL. The initialization fragment contains the codec private data for the stream, +and other data needed to set up the video or audio decoder and renderer. The media +fragments contain H.264-encoded video frames or AAC-encoded audio samples. The media +player receives the authenticated URL and requests stream metadata and media data normally. +When the media player requests data, it calls the following actions: +GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the +GetHLSMediaPlaylist action for each track, and additional metadata for the media player, +including estimated bitrate and resolution. GetHLSMediaPlaylist: Retrieves an HLS media +playlist, which contains a URL to access the MP4 initialization fragment with the +GetMP4InitFragment action, and URLs to access the MP4 media fragments with the +GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream +that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. +The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. +The HLS media playlist is continually updated with new fragments for sessions with a +PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the +audio track (if applicable) that contains MP4 media URLs for the specific track. +GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically +loads the initialization fragment before loading any media fragments. This fragment +contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to +initialize the media player decoder. The initialization fragment does not correspond to a +fragment in a Kinesis video stream. It contains only the codec private data for the stream +and respective track, which the media player needs to decode the media frames. +GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" +and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media +frames and their timestamps. For the HLS streaming session, in-track codec private data +(CPD) changes are supported. After the first media fragment is made available in a +streaming session, fragments can contain CPD changes for each track. Therefore, the +fragments in a session can have a different resolution, bit rate, or other information in +the CPD without interrupting playback. However, any change made in the track number or +track codec format can return an error when those different media fragments are loaded. For +example, streaming will fail if the fragments in the stream change from having only video +to having both audio and video, or if an AAC audio track is changed to an ALAW audio track. +For each streaming session, only 500 CPD changes are allowed. Data retrieved with this +action is billable. For information, see Pricing. GetTSFragment: Retrieves MPEG TS +fragments containing both initialization and media data for all tracks in the stream. If +the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and +GetMP4MediaFragment to retrieve stream media. Data retrieved with this action is billable. +For more information, see Kinesis Video Streams pricing. A streaming session URL must +not be shared between players. The service might throttle a session if multiple media +players are sharing it. For connection limits, see Kinesis Video Streams Limits. You can +monitor the amount of data that the media player consumes by monitoring the +GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using +CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For +pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services +Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply. For +more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you -want to report an issue to AWS, the support team can better diagnose the problem if given -the Request Id. Both the HTTP status code and the ErrorType header can be utilized to -make programmatic decisions about whether errors are retry-able and under what conditions, -as well as provide information on what actions the client programmer might need to take in -order to successfully try again. For more information, see the Errors section at the bottom -of this topic, as well as Common Errors. +want to report an issue to Amazon Web Services, the support team can better diagnose the +problem if given the Request Id. Both the HTTP status code and the ErrorType header can +be utilized to make programmatic decisions about whether errors are retry-able and under +what conditions, as well as provide information on what actions the client programmer might +need to take in order to successfully try again. For more information, see the Errors +section at the bottom of this topic, as well as Common Errors. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -435,22 +443,19 @@ function get_hlsstreaming_session_url( end """ - get_images(end_timestamp, format, image_selector_type, sampling_interval, start_timestamp) - get_images(end_timestamp, format, image_selector_type, sampling_interval, start_timestamp, params::Dict{String,<:Any}) + get_images(end_timestamp, format, image_selector_type, start_timestamp) + get_images(end_timestamp, format, image_selector_type, start_timestamp, params::Dict{String,<:Any}) -Retrieves a list of Images corresponding to each timestamp for a given time range, sampling +Retrieves a list of images corresponding to each timestamp for a given time range, sampling interval, and image format configuration. # Arguments -- `end_timestamp`: The end timestamp for the range of images to be generated. +- `end_timestamp`: The end timestamp for the range of images to be generated. If the time + range between StartTimestamp and EndTimestamp is more than 300 seconds above + StartTimestamp, you will receive an IllegalArgumentException. - `format`: The format that will be used to encode the image. - `image_selector_type`: The origin of the Server or Producer timestamps to use to generate the images. -- `sampling_interval`: The time interval in milliseconds (ms) at which the images need to - be generated from the stream. The minimum value that can be provided is 3000 ms. If the - timestamp range is less than the sampling interval, the Image from the startTimestamp will - be returned if available. The minimum value of 3000 ms is a soft limit. If needed, a - lower sampling frequency can be requested. - `start_timestamp`: The starting point from which the images should be generated. This StartTimestamp must be within an inclusive range of timestamps for an image to be returned. @@ -469,9 +474,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter is provided, its original aspect ratio will be used to calculate the WidthPixels ratio. If neither parameter is provided, the original image size will be returned. - `"MaxResults"`: The maximum number of images to be returned by the API. The default - limit is 100 images per API response. The additional results will be paginated. + limit is 25 images per API response. Providing a MaxResults greater than this value will + result in a page size of 25. Any additional results will be paginated. - `"NextToken"`: A token that specifies where to start paginating the next set of Images. This is the GetImages:NextToken from a previously truncated response. +- `"SamplingInterval"`: The time interval in milliseconds (ms) at which the images need to + be generated from the stream. The minimum value that can be provided is 200 ms (5 images + per second). If the timestamp range is less than the sampling interval, the image from the + startTimestamp will be returned if available. - `"StreamARN"`: The Amazon Resource Name (ARN) of the stream from which to retrieve the images. You must specify either the StreamName or the StreamARN. - `"StreamName"`: The name of the stream from which to retrieve the images. You must @@ -487,7 +497,6 @@ function get_images( EndTimestamp, Format, ImageSelectorType, - SamplingInterval, StartTimestamp; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -498,7 +507,6 @@ function get_images( "EndTimestamp" => EndTimestamp, "Format" => Format, "ImageSelectorType" => ImageSelectorType, - "SamplingInterval" => SamplingInterval, "StartTimestamp" => StartTimestamp, ); aws_config=aws_config, @@ -509,7 +517,6 @@ function get_images( EndTimestamp, Format, ImageSelectorType, - SamplingInterval, StartTimestamp, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -524,7 +531,6 @@ function get_images( "EndTimestamp" => EndTimestamp, "Format" => Format, "ImageSelectorType" => ImageSelectorType, - "SamplingInterval" => SamplingInterval, "StartTimestamp" => StartTimestamp, ), params, @@ -547,12 +553,12 @@ thrown after invoking a Kinesis Video Streams archived media API, in addition to status code and the response body, it includes the following pieces of information: x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an -issue to AWS, the support team can better diagnose the problem if given the Request Id. -Both the HTTP status code and the ErrorType header can be utilized to make programmatic -decisions about whether errors are retry-able and under what conditions, as well as provide -information on what actions the client programmer might need to take in order to -successfully try again. For more information, see the Errors section at the bottom of this -topic, as well as Common Errors. +issue to Amazon Web Services, the support team can better diagnose the problem if given the +Request Id. Both the HTTP status code and the ErrorType header can be utilized to make +programmatic decisions about whether errors are retry-able and under what conditions, as +well as provide information on what actions the client programmer might need to take in +order to successfully try again. For more information, see the Errors section at the bottom +of this topic, as well as Common Errors. # Arguments - `fragments`: A list of the numbers of fragments for which to retrieve media. You retrieve @@ -606,17 +612,17 @@ parameter. If an error is thrown after invoking a Kinesis Video Streams archi API, in addition to the HTTP status code and the response body, it includes the following pieces of information: x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – -if you want to report an issue to AWS, the support team can better diagnose the problem if -given the Request Id. Both the HTTP status code and the ErrorType header can be utilized -to make programmatic decisions about whether errors are retry-able and under what -conditions, as well as provide information on what actions the client programmer might need -to take in order to successfully try again. For more information, see the Errors section at -the bottom of this topic, as well as Common Errors. +if you want to report an issue to Amazon Web Services, the support team can better diagnose +the problem if given the Request Id. Both the HTTP status code and the ErrorType header +can be utilized to make programmatic decisions about whether errors are retry-able and +under what conditions, as well as provide information on what actions the client programmer +might need to take in order to successfully try again. For more information, see the Errors +section at the bottom of this topic, as well as Common Errors. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"FragmentSelector"`: Describes the timestamp range and timestamp origin for the range of - fragments to return. + fragments to return. This is only required when the NextToken isn't passed in the API. - `"MaxResults"`: The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results, then a ListFragmentsOutputNextToken is provided in the output that you can use to resume diff --git a/src/services/kms.jl b/src/services/kms.jl index 128206e809..8721f032c8 100644 --- a/src/services/kms.jl +++ b/src/services/kms.jl @@ -15,7 +15,8 @@ Service Developer Guide. The KMS key that you use for this operation must be in compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:CancelKeyDeletion (key -policy) Related operations: ScheduleKeyDeletion +policy) Related operations: ScheduleKeyDeletion Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key whose deletion is being canceled. Specify the key ID or @@ -90,7 +91,8 @@ Management Service Developer Guide. Cross-account use: No. You cannot perform t operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:ConnectCustomKeyStore (IAM policy) Related operations CreateCustomKeyStore DeleteCustomKeyStore DescribeCustomKeyStores -DisconnectCustomKeyStore UpdateCustomKeyStore +DisconnectCustomKeyStore UpdateCustomKeyStore Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `custom_key_store_id`: Enter the key store ID of the custom key store that you want to @@ -147,7 +149,8 @@ Cross-account use: No. You cannot perform this operation on an alias in a differ Web Services account. Required permissions kms:CreateAlias on the alias (IAM policy). kms:CreateAlias on the KMS key (key policy). For details, see Controlling access to aliases in the Key Management Service Developer Guide. Related operations: DeleteAlias - ListAliases UpdateAlias + ListAliases UpdateAlias Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `alias_name`: Specifies the alias name. This value must begin with alias/ followed by a @@ -239,7 +242,9 @@ failures, see Troubleshooting a custom key store in the Key Management Service D Guide. Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:CreateCustomKeyStore (IAM policy). Related operations: ConnectCustomKeyStore DeleteCustomKeyStore -DescribeCustomKeyStores DisconnectCustomKeyStore UpdateCustomKeyStore +DescribeCustomKeyStores DisconnectCustomKeyStore UpdateCustomKeyStore Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `custom_key_store_name`: Specifies a friendly name for the custom key store. The name @@ -311,8 +316,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in the Amazon Web Services account and Region. An external key store with PUBLIC_ENDPOINT connectivity cannot use the same XksProxyUriEndpoint value as an external key store with - VPC_ENDPOINT_SERVICE connectivity in the same Amazon Web Services Region. Each external - key store with VPC_ENDPOINT_SERVICE connectivity must have its own private DNS name. The + VPC_ENDPOINT_SERVICE connectivity in this Amazon Web Services Region. Each external key + store with VPC_ENDPOINT_SERVICE connectivity must have its own private DNS name. The XksProxyUriEndpoint value for external key stores with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique in the Amazon Web Services account and Region. - `"XksProxyUriPath"`: Specifies the base path to the proxy APIs for this external key @@ -386,6 +391,8 @@ Key Management Service Developer Guide. Cross-account use: Yes. To perform this on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter. Required permissions: kms:CreateGrant (key policy) Related operations: ListGrants ListRetirableGrants RetireGrant RevokeGrant +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `grantee_principal`: The identity that gets the permissions specified in the grant. To @@ -426,6 +433,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys constraint cannot exceed 384 characters. For information about grant constraints, see Using grant constraints in the Key Management Service Developer Guide. For more information about encryption context, see Encryption context in the Key Management Service Developer Guide . +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -521,15 +531,19 @@ encrypt and decrypt or sign and verify. You can't change these properties after is created. Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key -so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt -or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can -be used only to sign and verify messages. For information about asymmetric KMS keys, see -Asymmetric KMS keys in the Key Management Service Developer Guide. HMAC KMS key To -create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. -Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even -though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't -change these properties after the KMS key is created. HMAC KMS keys are symmetric keys that -never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify +so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with +RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not +both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages +or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to +sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to +either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you +must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric +KMS keys in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC +KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the +KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though +GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change +these properties after the KMS key is created. HMAC KMS keys are symmetric keys that never +leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes. Multi-Region primary keys Imported key material To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, @@ -588,7 +602,8 @@ in a different Amazon Web Services account. Required permissions: kms:CreateKey policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide. Related operations: DescribeKey ListKeys -ScheduleKeyDeletion +ScheduleKeyDeletion Eventual consistency: The KMS API follows an eventual consistency +model. For more information, see KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -630,20 +645,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys your data. These services do not support asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for KMS keys: Symmetric encryption key (default) SYMMETRIC_DEFAULT HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 - HMAC_512 Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 - Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) - ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) Other asymmetric elliptic curve - key pairs ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. SM2 key - pairs (China Regions only) SM2 + HMAC_512 Asymmetric RSA key pairs (encryption and decryption -or- signing and + verification) RSA_2048 RSA_3072 RSA_4096 Asymmetric NIST-recommended + elliptic curve key pairs (signing and verification -or- deriving shared secrets) + ECC_NIST_P256 (secp256r1) ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) + Other asymmetric elliptic curve key pairs (signing and verification) ECC_SECG_P256K1 + (secp256k1), commonly used for cryptocurrencies. SM2 key pairs (encryption and + decryption -or- signing and verification -or- deriving shared secrets) SM2 (China + Regions only) - `"KeyUsage"`: Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created. Select only one valid value. For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. For HMAC KMS keys (symmetric), - specify GENERATE_VERIFY_MAC. For asymmetric KMS keys with RSA key material, specify - ENCRYPT_DECRYPT or SIGN_VERIFY. For asymmetric KMS keys with ECC key material, specify - SIGN_VERIFY. For asymmetric KMS keys with SM2 key material (China Regions only), specify - ENCRYPT_DECRYPT or SIGN_VERIFY. + specify GENERATE_VERIFY_MAC. For asymmetric KMS keys with RSA key pairs, specify + ENCRYPT_DECRYPT or SIGN_VERIFY. For asymmetric KMS keys with NIST-recommended elliptic + curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT. For asymmetric KMS keys with + ECC_SECG_P256K1 key pairs specify SIGN_VERIFY. For asymmetric KMS keys with SM2 key pairs + (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT. - `"MultiRegion"`: Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key. For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this @@ -760,20 +779,25 @@ compute environment in Amazon EC2. To call Decrypt for a Nitro enclave, use the Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of the plaintext data, the response includes the plaintext data encrypted with the public key from the attestation -document (CiphertextForRecipient).For information about the interaction between KMS and +document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in -the Key Management Service Developer Guide.. The KMS key that you use for this operation +the Key Management Service Developer Guide. The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in a different Amazon Web Services account, specify the key ARN or the alias ARN of the KMS key. Required permissions: kms:Decrypt (key policy) Related operations: Encrypt GenerateDataKey GenerateDataKeyPair ReEncrypt +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `ciphertext_blob`: Ciphertext to be decrypted. The blob includes metadata. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionAlgorithm"`: Specifies the encryption algorithm that will be used to decrypt the ciphertext. Specify the same algorithm that was used to encrypt the data. If you specify a different algorithm, the Decrypt operation fails. This parameter is required only @@ -860,7 +884,8 @@ use: No. You cannot perform this operation on an alias in a different Amazon Web account. Required permissions kms:DeleteAlias on the alias (IAM policy). kms:DeleteAlias on the KMS key (key policy). For details, see Controlling access to aliases in the Key Management Service Developer Guide. Related operations: CreateAlias - ListAliases UpdateAlias + ListAliases UpdateAlias Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `alias_name`: The alias to be deleted. The alias name must begin with alias/ followed by @@ -919,7 +944,8 @@ returns a JSON object with no properties. Cross-account use: No. You cannot per operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:DeleteCustomKeyStore (IAM policy) Related operations: ConnectCustomKeyStore CreateCustomKeyStore DescribeCustomKeyStores -DisconnectCustomKeyStore UpdateCustomKeyStore +DisconnectCustomKeyStore UpdateCustomKeyStore Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `custom_key_store_id`: Enter the ID of the custom key store you want to delete. To find @@ -967,7 +993,8 @@ operation must be in a compatible key state. For details, see Key states of KMS Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:DeleteImportedKeyMaterial (key policy) Related operations: GetParametersForImport - ImportKeyMaterial + ImportKeyMaterial Eventual consistency: The KMS API follows an eventual consistency +model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key from which you are deleting imported key material. The @@ -998,6 +1025,140 @@ function delete_imported_key_material( ) end +""" + derive_shared_secret(key_agreement_algorithm, key_id, public_key) + derive_shared_secret(key_agreement_algorithm, key_id, public_key, params::Dict{String,<:Any}) + +Derives a shared secret using a key agreement algorithm. You must use an asymmetric +NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a +KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret. DeriveSharedSecret uses the +Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key +agreement between two peers by deriving a shared secret from their elliptic curve +public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns +to derive a symmetric key that can encrypt and decrypt data that is sent between the two +peers, or that can generate and verify HMACs. KMS recommends that you follow NIST +recommendations for key derivation when using the raw shared secret to derive a symmetric +key. The following workflow demonstrates how to establish key agreement over an insecure +communication channel using DeriveSharedSecret. Alice calls CreateKey to create an +asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT. The asymmetric KMS key must +use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec. Bob +creates an elliptic curve key pair. Bob can call CreateKey to create an asymmetric KMS key +pair or generate a key pair outside of KMS. Bob's key pair must use the same +NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice. Alice +and Bob exchange their public keys through an insecure communication channel (like the +internet). Use GetPublicKey to download the public key of your asymmetric KMS key pair. +KMS strongly recommends verifying that the public key you receive came from the expected +party before using it to derive a shared secret. Alice calls DeriveSharedSecret. KMS +uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the +Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. +The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret +returns the raw shared secret. Bob uses the Elliptic Curve Cryptography Cofactor +Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's +public key. To derive a shared secret you must provide a key agreement algorithm, the +private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China +Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic +curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric +KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the +same elliptic curve. The KMS key that you use for this operation must be in a compatible +key state. For details, see Key states of KMS keys in the Key Management Service Developer +Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different +Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId +parameter. Required permissions: kms:DeriveSharedSecret (key policy) Related operations: + CreateKey GetPublicKey DescribeKey Eventual consistency: The KMS API follows +an eventual consistency model. For more information, see KMS eventual consistency. + +# Arguments +- `key_agreement_algorithm`: Specifies the key agreement algorithm used to derive the + shared secret. The only valid value is ECDH. +- `key_id`: Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only) KMS + key. KMS uses the private key in the specified key pair to derive the shared secret. The + key usage of the KMS key must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the + DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a + different Amazon Web Services account, you must use the key ARN or alias ARN. For example: + Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: + arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: + alias/ExampleAlias Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias + name and alias ARN, use ListAliases. +- `public_key`: Specifies the public key in your peer's NIST-recommended elliptic curve + (ECC) or SM2 (China Regions only) key pair. The public key must be a DER-encoded X.509 + public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280. + GetPublicKey returns the public key of an asymmetric KMS key pair in the required + DER-encoded format. If you use Amazon Web Services CLI version 1, you must provide the + DER-encoded X.509 public key in a file. Otherwise, the Amazon Web Services CLI + Base64-encodes the public key a second time, resulting in a ValidationException. You can + specify the public key as binary data in a file using fileb (fileb://<path-to-file>) + or in-line using a Base64 encoded string. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. +- `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call + this operation comes from a new grant that has not yet achieved eventual consistency. For + more information, see Grant token and Using a grant token in the Key Management Service + Developer Guide. +- `"Recipient"`: A signed attestation document from an Amazon Web Services Nitro enclave + and the encryption algorithm to use with the enclave's public key. The only valid + encryption algorithm is RSAES_OAEP_SHA_256. This parameter only supports attestation + documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon + Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the + attestation document and then use the Recipient parameter from any Amazon Web Services SDK + to provide the attestation document for the enclave. When you use this parameter, instead + of returning a plaintext copy of the shared secret, KMS encrypts the plaintext shared + secret under the public key in the attestation document, and returns the resulting + ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be + decrypted only with the private key in the enclave. The CiphertextBlob field in the + response contains the encrypted shared secret derived from the KMS key specified by the + KeyId parameter and public key specified by the PublicKey parameter. The SharedSecret field + in the response is null or empty. For information about the interaction between KMS and + Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in + the Key Management Service Developer Guide. +""" +function derive_shared_secret( + KeyAgreementAlgorithm, + KeyId, + PublicKey; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kms( + "DeriveSharedSecret", + Dict{String,Any}( + "KeyAgreementAlgorithm" => KeyAgreementAlgorithm, + "KeyId" => KeyId, + "PublicKey" => PublicKey, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function derive_shared_secret( + KeyAgreementAlgorithm, + KeyId, + PublicKey, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kms( + "DeriveSharedSecret", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "KeyAgreementAlgorithm" => KeyAgreementAlgorithm, + "KeyId" => KeyId, + "PublicKey" => PublicKey, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_custom_key_stores() describe_custom_key_stores(params::Dict{String,<:Any}) @@ -1026,7 +1187,8 @@ Management Service Developer Guide. Cross-account use: No. You cannot perform t operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:DescribeCustomKeyStores (IAM policy) Related operations: ConnectCustomKeyStore CreateCustomKeyStore DeleteCustomKeyStore -DisconnectCustomKeyStore UpdateCustomKeyStore +DisconnectCustomKeyStore UpdateCustomKeyStore Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1089,7 +1251,8 @@ Services alias with no key ID. Cross-account use: Yes. To perform this operatio KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:DescribeKey (key policy) Related operations: GetKeyPolicy GetKeyRotationStatus ListAliases ListGrants -ListKeys ListResourceTags ListRetirableGrants +ListKeys ListResourceTags ListRetirableGrants Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Describes the specified KMS key. If you specify a predefined Amazon Web @@ -1141,6 +1304,8 @@ The KMS key that you use for this operation must be in a compatible key state. F see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:DisableKey (key policy) Related operations: EnableKey + Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key to disable. Specify the key ID or key ARN of the KMS @@ -1187,6 +1352,8 @@ state. For details, see Key states of KMS keys in the Key Management Service Dev Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:DisableKeyRotation (key policy) Related operations: EnableKeyRotation GetKeyRotationStatus +ListKeyRotations RotateKeyOnDemand Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies a symmetric encryption KMS key. You cannot enable or disable @@ -1238,7 +1405,8 @@ no properties. Cross-account use: No. You cannot perform this operation on a cu store in a different Amazon Web Services account. Required permissions: kms:DisconnectCustomKeyStore (IAM policy) Related operations: ConnectCustomKeyStore CreateCustomKeyStore DeleteCustomKeyStore DescribeCustomKeyStores -UpdateCustomKeyStore +UpdateCustomKeyStore Eventual consistency: The KMS API follows an eventual consistency +model. For more information, see KMS eventual consistency. # Arguments - `custom_key_store_id`: Enter the ID of the custom key store you want to disconnect. To @@ -1281,7 +1449,8 @@ cryptographic operations. The KMS key that you use for this operation must be i compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:EnableKey (key policy) -Related operations: DisableKey +Related operations: DisableKey Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key to enable. Specify the key ID or key ARN of the KMS key. @@ -1314,27 +1483,37 @@ end enable_key_rotation(key_id, params::Dict{String,<:Any}) Enables automatic rotation of the key material of the specified symmetric encryption KMS -key. When you enable automatic rotation of acustomer managed KMS key, KMS rotates the key -material of the KMS key one year (approximately 365 days) from the enable date and every -year thereafter. You can monitor rotation of the key material for your KMS keys in -CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer -managed KMS key, use the DisableKeyRotation operation. Automatic key rotation is supported -only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric -KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key -store. To enable or disable automatic rotation of a set of related multi-Region keys, set -the property on the primary key. You cannot enable or disable automatic rotation Amazon -Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services -managed keys every year. Rotation of Amazon Web Services owned KMS keys varies. In May -2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every -three years (approximately 1,095 days) to every year (approximately 365 days). New Amazon -Web Services managed keys are automatically rotated one year after they are created, and -approximately every year thereafter. Existing Amazon Web Services managed keys are -automatically rotated one year after their most recent rotation, and every year thereafter. - The KMS key that you use for this operation must be in a compatible key state. For -details, see Key states of KMS keys in the Key Management Service Developer Guide. -Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon -Web Services account. Required permissions: kms:EnableKeyRotation (key policy) Related -operations: DisableKeyRotation GetKeyRotationStatus +key. By default, when you enable automatic rotation of a customer managed KMS key, KMS +rotates the key material of the KMS key one year (approximately 365 days) from the enable +date and every year thereafter. You can use the optional RotationPeriodInDays parameter to +specify a custom rotation period when you enable key rotation, or you can use +RotationPeriodInDays to modify the rotation period of a key that you previously enabled +automatic key rotation on. You can monitor rotation of the key material for your KMS keys +in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer +managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus +operation to identify any in progress rotations. You can use the ListKeyRotations operation +to view the details of completed rotations. Automatic key rotation is supported only on +symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, +HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To +enable or disable automatic rotation of a set of related multi-Region keys, set the +property on the primary key. You cannot enable or disable automatic rotation of Amazon Web +Services managed KMS keys. KMS always rotates the key material of Amazon Web Services +managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the +Amazon Web Services service that owns the key. In May 2022, KMS changed the rotation +schedule for Amazon Web Services managed keys from every three years (approximately 1,095 +days) to every year (approximately 365 days). New Amazon Web Services managed keys are +automatically rotated one year after they are created, and approximately every year +thereafter. Existing Amazon Web Services managed keys are automatically rotated one year +after their most recent rotation, and every year thereafter. The KMS key that you use for +this operation must be in a compatible key state. For details, see Key states of KMS keys +in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform +this operation on a KMS key in a different Amazon Web Services account. Required +permissions: kms:EnableKeyRotation (key policy) Related operations: DisableKeyRotation + GetKeyRotationStatus ListKeyRotations RotateKeyOnDemand You can perform +on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, +regardless of whether or not automatic key rotation is enabled. Eventual consistency: +The KMS API follows an eventual consistency model. For more information, see KMS eventual +consistency. # Arguments - `key_id`: Identifies a symmetric encryption KMS key. You cannot enable automatic rotation @@ -1345,6 +1524,14 @@ operations: DisableKeyRotation GetKeyRotationStatus arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RotationPeriodInDays"`: Use this parameter to specify a custom period of time between + each rotation date. If no value is specified, the default value is 365 days. The rotation + period defines the number of days after you enable automatic key rotation that KMS will + rotate your key material, and the number of days between each automatic rotation + thereafter. You can use the kms:RotationPeriodInDays condition key to further constrain + the values that principals can specify in the RotationPeriodInDays parameter. """ function enable_key_rotation(KeyId; aws_config::AbstractAWSConfig=global_aws_config()) return kms( @@ -1400,7 +1587,8 @@ states of KMS keys in the Key Management Service Developer Guide. Cross-account To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:Encrypt (key policy) Related operations: Decrypt GenerateDataKey - GenerateDataKeyPair + GenerateDataKeyPair Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key to use in the encryption operation. The KMS key must @@ -1417,6 +1605,9 @@ permissions: kms:Encrypt (key policy) Related operations: Decrypt Gener # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionAlgorithm"`: Specifies the encryption algorithm that KMS will use to encrypt the plaintext message. The algorithm must be compatible with the KMS key that you specify. This parameter is required only for asymmetric KMS keys. The default value, @@ -1516,7 +1707,9 @@ plaintext data key from memory. Cross-account use: Yes. To perform this opera KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:GenerateDataKey (key policy) Related operations: Decrypt Encrypt GenerateDataKeyPair -GenerateDataKeyPairWithoutPlaintext GenerateDataKeyWithoutPlaintext +GenerateDataKeyPairWithoutPlaintext GenerateDataKeyWithoutPlaintext Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `key_id`: Specifies the symmetric encryption KMS key that encrypts the data key. You @@ -1533,6 +1726,9 @@ GenerateDataKeyPairWithoutPlaintext GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the data key. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output. An encryption @@ -1595,20 +1791,20 @@ Returns a unique asymmetric data key pair for use outside of KMS. This operation plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The -bytes in the keys are random; they not related to the caller or to the KMS key that is used -to encrypt the private key. You can use the public key that GenerateDataKeyPair returns to -encrypt data or verify a signature outside of KMS. Then, store the encrypted private key -with the data. When you are ready to decrypt data or sign a message, you can use the -Decrypt operation to decrypt the encrypted private key. To generate a data key pair, you -must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. -You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type -and origin of your KMS key, use the DescribeKey operation. Use the KeyPairSpec parameter -to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also -choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use -RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot -enforce any restrictions on the use of data key pairs outside of KMS. If you are using the -data key pair to encrypt data, or for any operation where you don't immediately need a -private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. +bytes in the keys are random; they are not related to the caller or to the KMS key that is +used to encrypt the private key. You can use the public key that GenerateDataKeyPair +returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted +private key with the data. When you are ready to decrypt data or sign a message, you can +use the Decrypt operation to decrypt the encrypted private key. To generate a data key +pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data +key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get +the type and origin of your KMS key, use the DescribeKey operation. Use the KeyPairSpec +parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can +also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, +and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS +cannot enforce any restrictions on the use of data key pairs outside of KMS. If you are +using the data key pair to encrypt data, or for any operation where you don't immediately +need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt @@ -1638,7 +1834,8 @@ To perform this operation with a KMS key in a different Amazon Web Services acco specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:GenerateDataKeyPair (key policy) Related operations: Decrypt Encrypt GenerateDataKey GenerateDataKeyPairWithoutPlaintext -GenerateDataKeyWithoutPlaintext +GenerateDataKeyWithoutPlaintext Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Specifies the symmetric encryption KMS key that encrypts the private key in the @@ -1660,6 +1857,9 @@ GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the private key in the data key pair. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other @@ -1677,17 +1877,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Recipient"`: A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256. This parameter only supports attestation - documents for Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon - Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. When you use this - parameter, instead of returning a plaintext copy of the private data key, KMS encrypts the - plaintext private data key under the public key in the attestation document, and returns - the resulting ciphertext in the CiphertextForRecipient field in the response. This - ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob - field in the response contains a copy of the private data key encrypted under the KMS key - specified by the KeyId parameter. The PrivateKeyPlaintext field in the response is null or - empty. For information about the interaction between KMS and Amazon Web Services Nitro - Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service - Developer Guide. + documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon + Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the + attestation document and then use the Recipient parameter from any Amazon Web Services SDK + to provide the attestation document for the enclave. When you use this parameter, instead + of returning a plaintext copy of the private data key, KMS encrypts the plaintext private + data key under the public key in the attestation document, and returns the resulting + ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be + decrypted only with the private key in the enclave. The CiphertextBlob field in the + response contains a copy of the private data key encrypted under the KMS key specified by + the KeyId parameter. The PrivateKeyPlaintext field in the response is null or empty. For + information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see + How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer + Guide. """ function generate_data_key_pair( KeyId, KeyPairSpec; aws_config::AbstractAWSConfig=global_aws_config() @@ -1752,7 +1954,9 @@ Service Developer Guide. Cross-account use: Yes. To perform this operation with in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy) Related operations: Decrypt Encrypt GenerateDataKey -GenerateDataKeyPair GenerateDataKeyWithoutPlaintext +GenerateDataKeyPair GenerateDataKeyWithoutPlaintext Eventual consistency: The KMS +API follows an eventual consistency model. For more information, see KMS eventual +consistency. # Arguments - `key_id`: Specifies the symmetric encryption KMS key that encrypts the private key in the @@ -1774,6 +1978,9 @@ GenerateDataKeyPair GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the private key in the data key pair. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other @@ -1857,7 +2064,8 @@ Yes. To perform this operation with a KMS key in a different Amazon Web Services specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy) Related operations: Decrypt Encrypt GenerateDataKey GenerateDataKeyPair -GenerateDataKeyPairWithoutPlaintext +GenerateDataKeyPairWithoutPlaintext Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Specifies the symmetric encryption KMS key that encrypts the data key. You @@ -1874,6 +2082,9 @@ GenerateDataKeyPairWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the data key. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output. An encryption @@ -1936,7 +2147,9 @@ you use for this operation must be in a compatible key state. For details, see K of KMS keys in the Key Management Service Developer Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -kms:GenerateMac (key policy) Related operations: VerifyMac +kms:GenerateMac (key policy) Related operations: VerifyMac Eventual consistency: The KMS +API follows an eventual consistency model. For more information, see KMS eventual +consistency. # Arguments - `key_id`: The HMAC KMS key to use in the operation. The MAC algorithm computes the HMAC @@ -1953,6 +2166,9 @@ kms:GenerateMac (key policy) Related operations: VerifyMac # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -2012,7 +2228,8 @@ How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service De Guide. For more information about entropy and random number generation, see Key Management Service Cryptographic Details. Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys. Required permissions: -kms:GenerateRandom (IAM policy) +kms:GenerateRandom (IAM policy) Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2047,47 +2264,41 @@ function generate_random( end """ - get_key_policy(key_id, policy_name) - get_key_policy(key_id, policy_name, params::Dict{String,<:Any}) + get_key_policy(key_id) + get_key_policy(key_id, params::Dict{String,<:Any}) Gets a key policy attached to the specified KMS key. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required -permissions: kms:GetKeyPolicy (key policy) Related operations: PutKeyPolicy +permissions: kms:GetKeyPolicy (key policy) Related operations: PutKeyPolicy Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `key_id`: Gets the key policy for the specified KMS key. Specify the key ID or key ARN of the KMS key. For example: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. -- `policy_name`: Specifies the name of the key policy. The only valid name is default. To - get the names of key policies, use ListKeyPolicies. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"PolicyName"`: Specifies the name of the key policy. If no policy name is specified, the + default value is default. The only valid name is default. To get the names of key policies, + use ListKeyPolicies. """ -function get_key_policy( - KeyId, PolicyName; aws_config::AbstractAWSConfig=global_aws_config() -) +function get_key_policy(KeyId; aws_config::AbstractAWSConfig=global_aws_config()) return kms( "GetKeyPolicy", - Dict{String,Any}("KeyId" => KeyId, "PolicyName" => PolicyName); + Dict{String,Any}("KeyId" => KeyId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_key_policy( - KeyId, - PolicyName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + KeyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return kms( "GetKeyPolicy", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("KeyId" => KeyId, "PolicyName" => PolicyName), - params, - ), - ); + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("KeyId" => KeyId), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2097,34 +2308,38 @@ end get_key_rotation_status(key_id) get_key_rotation_status(key_id, params::Dict{String,<:Any}) -Gets a Boolean value that indicates whether automatic rotation of the key material is -enabled for the specified KMS key. When you enable automatic rotation for customer managed -KMS keys, KMS rotates the key material of the KMS key one year (approximately 365 days) -from the enable date and every year thereafter. You can monitor rotation of the key -material for your KMS keys in CloudTrail and Amazon CloudWatch. Automatic key rotation is -supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of -asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a -custom key store. To enable or disable automatic rotation of a set of related multi-Region -keys, set the property on the primary key.. You can enable (EnableKeyRotation) and disable -automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. -Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS -always rotates the key material in Amazon Web Services managed KMS keys every year. The key -rotation status for Amazon Web Services managed KMS keys is always true. In May 2022, KMS -changed the rotation schedule for Amazon Web Services managed keys from every three years -to every year. For details, see EnableKeyRotation. The KMS key that you use for this -operation must be in a compatible key state. For details, see Key states of KMS keys in the -Key Management Service Developer Guide. Disabled: The key rotation status does not change -when you disable a KMS key. However, while the KMS key is disabled, KMS does not rotate the -key material. When you re-enable the KMS key, rotation resumes. If the key material in the -re-enabled KMS key hasn't been rotated in one year, KMS rotates it immediately, and every -year thereafter. If it's been less than a year since the key material in the re-enabled KMS -key was rotated, the KMS key resumes its prior rotation schedule. Pending deletion: While -a KMS key is pending deletion, its key rotation status is false and KMS does not rotate the -key material. If you cancel the deletion, the original key rotation status returns to true. - Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web -Services account, specify the key ARN in the value of the KeyId parameter. Required -permissions: kms:GetKeyRotationStatus (key policy) Related operations: -DisableKeyRotation EnableKeyRotation +Provides detailed information about the rotation status for a KMS key, including whether +automatic rotation of the key material is enabled for the specified KMS key, the rotation +period, and the next scheduled rotation date. Automatic key rotation is supported only on +symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, +HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To +enable or disable automatic rotation of a set of related multi-Region keys, set the +property on the primary key.. You can enable (EnableKeyRotation) and disable automatic +rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key +material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always +rotates the key material in Amazon Web Services managed KMS keys every year. The key +rotation status for Amazon Web Services managed KMS keys is always true. You can perform +on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, +regardless of whether or not automatic key rotation is enabled. You can use +GetKeyRotationStatus to identify the date and time that an in progress on-demand rotation +was initiated. You can use ListKeyRotations to view the details of completed rotations. In +May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every +three years to every year. For details, see EnableKeyRotation. The KMS key that you use +for this operation must be in a compatible key state. For details, see Key states of KMS +keys in the Key Management Service Developer Guide. Disabled: The key rotation status +does not change when you disable a KMS key. However, while the KMS key is disabled, KMS +does not rotate the key material. When you re-enable the KMS key, rotation resumes. If the +key material in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it +immediately, and every year thereafter. If it's been less than a year since the key +material in the re-enabled KMS key was rotated, the KMS key resumes its prior rotation +schedule. Pending deletion: While a KMS key is pending deletion, its key rotation status +is false and KMS does not rotate the key material. If you cancel the deletion, the original +key rotation status returns to true. Cross-account use: Yes. To perform this operation +on a KMS key in a different Amazon Web Services account, specify the key ARN in the value +of the KeyId parameter. Required permissions: kms:GetKeyRotationStatus (key policy) +Related operations: DisableKeyRotation EnableKeyRotation ListKeyRotations +RotateKeyOnDemand Eventual consistency: The KMS API follows an eventual consistency +model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Gets the rotation status for the specified KMS key. Specify the key ID or key @@ -2188,6 +2403,8 @@ compatible key state. For details, see Key states of KMS keys in the Key Managem Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:GetParametersForImport (key policy) Related operations: ImportKeyMaterial DeleteImportedKeyMaterial +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: The identifier of the KMS key that will be associated with the imported key @@ -2212,7 +2429,7 @@ a different Amazon Web Services account. Required permissions: kms:GetParameter RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. RSAES_PKCS1_V1_5 (Deprecated) — - Supported only for symmetric encryption key material (and only in legacy mode). + As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm. - `wrapping_key_spec`: The type of RSA public key to return in the response. You will use this wrapping key with the specified wrapping algorithm to protect your key material during import. Use the longest RSA wrapping key that is practical. You cannot use an RSA_2048 @@ -2279,21 +2496,23 @@ reduce of risk of encrypting data that cannot be decrypted. These features are n effective outside of KMS. To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including: KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. - KeyUsage: Whether the key is used for encryption or signing. EncryptionAlgorithms or -SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the -key. Although KMS cannot enforce these restrictions on external operations, it is crucial -that you use this information to prevent the public key from being used improperly. For -example, you can prevent a public signing key from being used encrypt data, or prevent a -public key from being used with an encryption algorithm that is not supported by KMS. You -can also avoid errors, such as using the wrong signing algorithm in a verification -operation. To verify a signature outside of KMS with an SM2 public key (China Regions -only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the -distinguishing ID. For more information, see Offline verification with SM2 key pairs. The -KMS key that you use for this operation must be in a compatible key state. For details, see -Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: -Yes. To perform this operation with a KMS key in a different Amazon Web Services account, -specify the key ARN or alias ARN in the value of the KeyId parameter. Required -permissions: kms:GetPublicKey (key policy) Related operations: CreateKey + KeyUsage: Whether the key is used for encryption, signing, or deriving a shared secret. + EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the +signing algorithms for the key. Although KMS cannot enforce these restrictions on +external operations, it is crucial that you use this information to prevent the public key +from being used improperly. For example, you can prevent a public signing key from being +used encrypt data, or prevent a public key from being used with an encryption algorithm +that is not supported by KMS. You can also avoid errors, such as using the wrong signing +algorithm in a verification operation. To verify a signature outside of KMS with an SM2 +public key (China Regions only), you must specify the distinguishing ID. By default, KMS +uses 1234567812345678 as the distinguishing ID. For more information, see Offline +verification with SM2 key pairs. The KMS key that you use for this operation must be in a +compatible key state. For details, see Key states of KMS keys in the Key Management Service +Developer Guide. Cross-account use: Yes. To perform this operation with a KMS key in a +different Amazon Web Services account, specify the key ARN or alias ARN in the value of the +KeyId parameter. Required permissions: kms:GetPublicKey (key policy) Related operations: +CreateKey Eventual consistency: The KMS API follows an eventual consistency model. For +more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the asymmetric KMS key that includes the public key. To specify a @@ -2346,49 +2565,49 @@ Service Developer Guide. After you successfully import key material into a KMS k reimport the same key material into that KMS key, but you cannot import different key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration -model or expiration date of the key material. Before reimporting key material, if -necessary, call DeleteImportedKeyMaterial to delete the current imported key material. -Each time you import key material into KMS, you can determine whether (ExpirationModel) and -when (ValidTo) the key material expires. To change the expiration of your key material, you -must import it again, either by calling ImportKeyMaterial or using the import features of -the KMS console. Before calling ImportKeyMaterial: Create or identify a KMS key with no -key material. The KMS key must have an Origin value of EXTERNAL, which indicates that the -KMS key is designed for imported key material. To create an new KMS key for imported key -material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a -symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric -signing KMS key. You can also import key material into a multi-Region key of any supported -type. However, you can't import key material into a KMS key in a custom key store. Use -the DescribeKey operation to verify that the KeyState of the KMS key is PendingImport, -which indicates that the KMS key has no key material. If you are reimporting the same key -material into an existing KMS key, you might need to call the DeleteImportedKeyMaterial to -delete its existing key material. Call the GetParametersForImport operation to get a -public key and import token set for importing key material. Use the public key in the -GetParametersForImport response to encrypt your key material. Then, in an -ImportKeyMaterial request, you submit your encrypted key material and import token. When -calling this operation, you must specify the following values: The key ID or key ARN of -the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and -its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a -custom key store, or on a KMS key in a different Amazon Web Services account. To get the -Origin and KeyState of a KMS key, call DescribeKey. The encrypted key material. The -import token that GetParametersForImport returned. You must use a public key and token from -the same GetParametersForImport response. Whether the key material expires -(ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an -expiration time in the Key Management Service Developer Guide. If you set an expiration -date, KMS deletes the key material from the KMS key on the specified date, making the KMS -key unusable. To use the KMS key in cryptographic operations again, you must reimport the -same key material. However, you can delete and reimport the key material at any time, -including before the key material expires. Each time you reimport, you can eliminate or -reset the expiration time. When this operation is successful, the key state of the KMS -key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic -operations. If this operation fails, use the exception to help determine the problem. If -the error is related to the key material, the import token, or wrapping key, use -GetParametersForImport to get a new public key and import token for the KMS key and repeat -the import procedure. For help, see How To Import Key Material in the Key Management -Service Developer Guide. The KMS key that you use for this operation must be in a -compatible key state. For details, see Key states of KMS keys in the Key Management Service -Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in -a different Amazon Web Services account. Required permissions: kms:ImportKeyMaterial (key -policy) Related operations: DeleteImportedKeyMaterial GetParametersForImport +model or expiration date of the key material. Each time you import key material into KMS, +you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To +change the expiration of your key material, you must import it again, either by calling +ImportKeyMaterial or using the import features of the KMS console. Before calling +ImportKeyMaterial: Create or identify a KMS key with no key material. The KMS key must +have an Origin value of EXTERNAL, which indicates that the KMS key is designed for imported +key material. To create an new KMS key for imported key material, call the CreateKey +operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, +HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also +import key material into a multi-Region key of any supported type. However, you can't +import key material into a KMS key in a custom key store. Use the DescribeKey operation +to verify that the KeyState of the KMS key is PendingImport, which indicates that the KMS +key has no key material. If you are reimporting the same key material into an existing KMS +key, you might need to call the DeleteImportedKeyMaterial to delete its existing key +material. Call the GetParametersForImport operation to get a public key and import token +set for importing key material. Use the public key in the GetParametersForImport +response to encrypt your key material. Then, in an ImportKeyMaterial request, you submit +your encrypted key material and import token. When calling this operation, you must specify +the following values: The key ID or key ARN of the KMS key to associate with the imported +key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You +cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a +different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call +DescribeKey. The encrypted key material. The import token that GetParametersForImport +returned. You must use a public key and token from the same GetParametersForImport +response. Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). +For help with this choice, see Setting an expiration time in the Key Management Service +Developer Guide. If you set an expiration date, KMS deletes the key material from the KMS +key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic +operations again, you must reimport the same key material. However, you can delete and +reimport the key material at any time, including before the key material expires. Each time +you reimport, you can eliminate or reset the expiration time. When this operation is +successful, the key state of the KMS key changes from PendingImport to Enabled, and you can +use the KMS key in cryptographic operations. If this operation fails, use the exception to +help determine the problem. If the error is related to the key material, the import token, +or wrapping key, use GetParametersForImport to get a new public key and import token for +the KMS key and repeat the import procedure. For help, see How To Import Key Material in +the Key Management Service Developer Guide. The KMS key that you use for this operation +must be in a compatible key state. For details, see Key states of KMS keys in the Key +Management Service Developer Guide. Cross-account use: No. You cannot perform this +operation on a KMS key in a different Amazon Web Services account. Required permissions: +kms:ImportKeyMaterial (key policy) Related operations: DeleteImportedKeyMaterial +GetParametersForImport Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `encrypted_key_material`: The encrypted key material to import. The key material must be @@ -2488,7 +2707,8 @@ in your account, including predefined aliases, do not count against your KMS ali Cross-account use: No. ListAliases does not return aliases in other Amazon Web Services accounts. Required permissions: kms:ListAliases (IAM policy) For details, see Controlling access to aliases in the Key Management Service Developer Guide. Related operations: -CreateAlias DeleteAlias UpdateAlias +CreateAlias DeleteAlias UpdateAlias Eventual consistency: The KMS API follows +an eventual consistency model. For more information, see KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2533,6 +2753,8 @@ several different grantee principals. Cross-account use: Yes. To perform this on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter. Required permissions: kms:ListGrants (key policy) Related operations: CreateGrant ListRetirableGrants RetireGrant RevokeGrant +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: Returns only grants for the specified KMS key. This parameter is required. @@ -2584,6 +2806,8 @@ designed to get policy names that you can use in a GetKeyPolicy operation. Howev only valid policy name is default. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:ListKeyPolicies (key policy) Related operations: GetKeyPolicy PutKeyPolicy +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: Gets the names of key policies for the specified KMS key. Specify the key ID or @@ -2621,6 +2845,55 @@ function list_key_policies( ) end +""" + list_key_rotations(key_id) + list_key_rotations(key_id, params::Dict{String,<:Any}) + +Returns information about all completed key material rotations for the specified KMS key. +You must specify the KMS key in all requests. You can refine the key rotations list by +limiting the number of rotations returned. For detailed information about automatic and +on-demand key rotations, see Rotating KMS keys in the Key Management Service Developer +Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a +different Amazon Web Services account. Required permissions: kms:ListKeyRotations (key +policy) Related operations: EnableKeyRotation DisableKeyRotation +GetKeyRotationStatus RotateKeyOnDemand Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. + +# Arguments +- `key_id`: Gets the key rotations for the specified KMS key. Specify the key ID or key ARN + of the KMS key. For example: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: + arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the + key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: Use this parameter to specify the maximum number of items to return. When this + value is present, KMS does not return more than the specified number of items, but it might + return fewer. This value is optional. If you include a value, it must be between 1 and + 1000, inclusive. If you do not include a value, it defaults to 100. +- `"Marker"`: Use this parameter in a subsequent request after you receive a response with + truncated results. Set it to the value of NextMarker from the truncated response you just + received. +""" +function list_key_rotations(KeyId; aws_config::AbstractAWSConfig=global_aws_config()) + return kms( + "ListKeyRotations", + Dict{String,Any}("KeyId" => KeyId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_key_rotations( + KeyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kms( + "ListKeyRotations", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("KeyId" => KeyId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_keys() list_keys(params::Dict{String,<:Any}) @@ -2628,7 +2901,9 @@ end Gets a list of all KMS keys in the caller's Amazon Web Services account and Region. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:ListKeys (IAM policy) Related operations: - CreateKey DescribeKey ListAliases ListResourceTags + CreateKey DescribeKey ListAliases ListResourceTags Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2658,7 +2933,9 @@ the format and syntax, see Tagging Amazon Web Services resources in the Amazon W General Reference. For information about using tags in KMS, see Tagging keys. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:ListResourceTags (key policy) Related -operations: CreateKey ReplicateKey TagResource UntagResource +operations: CreateKey ReplicateKey TagResource UntagResource Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `key_id`: Gets tags on the specified KMS key. Specify the key ID or key ARN of the KMS @@ -2708,12 +2985,19 @@ to determine which grants you may retire. To retire a grant, use the RetireGrant For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants. Cross-account use: You must -specify a principal in your Amazon Web Services account. However, this operation can return -grants in any Amazon Web Services account. You do not need kms:ListRetirableGrants -permission (or any other additional permission) in any Amazon Web Services account other -than your own. Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon -Web Services account. Related operations: CreateGrant ListGrants RetireGrant - RevokeGrant +specify a principal in your Amazon Web Services account. This operation returns a list of +grants where the retiring principal specified in the ListRetirableGrants request is the +same retiring principal on the grant. This can include grants on KMS keys owned by other +Amazon Web Services accounts, but you do not need kms:ListRetirableGrants permission (or +any other additional permission) in any Amazon Web Services account other than your own. +Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon Web Services +account. KMS authorizes ListRetirableGrants requests by evaluating the caller account's +kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants calls +is the retiring principal specified in the request. KMS does not evaluate the caller's +permissions to verify their access to any KMS keys or grants that might be returned by the +ListRetirableGrants call. Related operations: CreateGrant ListGrants +RetireGrant RevokeGrant Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `retiring_principal`: The retiring principal for which to list grants. Enter a principal @@ -2761,8 +3045,8 @@ function list_retirable_grants( end """ - put_key_policy(key_id, policy, policy_name) - put_key_policy(key_id, policy, policy_name, params::Dict{String,<:Any}) + put_key_policy(key_id, policy) + put_key_policy(key_id, policy, params::Dict{String,<:Any}) Attaches a key policy to the specified KMS key. For more information about key policies, see Key Policies in the Key Management Service Developer Guide. For help writing and @@ -2771,7 +3055,8 @@ Access Management User Guide . For examples of adding a key policy in multiple p languages, see Setting a key policy in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:PutKeyPolicy (key policy) Related -operations: GetKeyPolicy +operations: GetKeyPolicy Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Sets the key policy on the specified KMS key. Specify the key ID or key ARN of @@ -2796,7 +3081,6 @@ operations: GetKeyPolicy policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide . -- `policy_name`: The name of the key policy. The only valid value is default. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2806,13 +3090,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Default key policy in the Key Management Service Developer Guide. Use this parameter only when you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the KMS key. +- `"PolicyName"`: The name of the key policy. If no policy name is specified, the default + value is default. The only valid value is default. """ -function put_key_policy( - KeyId, Policy, PolicyName; aws_config::AbstractAWSConfig=global_aws_config() -) +function put_key_policy(KeyId, Policy; aws_config::AbstractAWSConfig=global_aws_config()) return kms( "PutKeyPolicy", - Dict{String,Any}("KeyId" => KeyId, "Policy" => Policy, "PolicyName" => PolicyName); + Dict{String,Any}("KeyId" => KeyId, "Policy" => Policy); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2820,7 +3104,6 @@ end function put_key_policy( KeyId, Policy, - PolicyName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -2828,11 +3111,7 @@ function put_key_policy( "PutKeyPolicy", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "KeyId" => KeyId, "Policy" => Policy, "PolicyName" => PolicyName - ), - params, + _merge, Dict{String,Any}("KeyId" => KeyId, "Policy" => Policy), params ), ); aws_config=aws_config, @@ -2889,7 +3168,8 @@ destination KMS key (key policy) To permit reencryption from or to a KMS key, in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy. Related operations: Decrypt Encrypt -GenerateDataKey GenerateDataKeyPair +GenerateDataKey GenerateDataKeyPair Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `ciphertext_blob`: Ciphertext of the data to reencrypt. @@ -2923,6 +3203,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended. For more information, see Encryption context in the Key Management Service Developer Guide. +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -3040,7 +3323,8 @@ Services account. Required permissions: kms:ReplicateKey on the primary ke primary key's Region). Include this permission in the primary key's key policy. kms:CreateKey in an IAM policy in the replica Region. To use the Tags parameter, kms:TagResource in an IAM policy in the replica Region. Related operations CreateKey - UpdatePrimaryRegion + UpdatePrimaryRegion Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the multi-Region primary key that is being replicated. To determine @@ -3162,13 +3446,17 @@ Service Developer Guide. For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants. Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web -Services account. Required permissions::Permission to retire a grant is determined +Services account. Required permissions: Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide. Related operations: CreateGrant ListGrants -ListRetirableGrants RevokeGrant +ListRetirableGrants RevokeGrant Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantId"`: Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants, or ListRetirableGrants. Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 @@ -3206,7 +3494,8 @@ examples of working with grants in several programming languages, see Programmin Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter. Required permissions: kms:RevokeGrant (key policy). Related operations: CreateGrant -ListGrants ListRetirableGrants RetireGrant +ListGrants ListRetirableGrants RetireGrant Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `grant_id`: Identifies the grant to revoke. To get the grant ID, use CreateGrant, @@ -3218,6 +3507,11 @@ ListGrants ListRetirableGrants RetireGrant arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. """ function revoke_grant(GrantId, KeyId; aws_config::AbstractAWSConfig=global_aws_config()) return kms( @@ -3245,6 +3539,67 @@ function revoke_grant( ) end +""" + rotate_key_on_demand(key_id) + rotate_key_on_demand(key_id, params::Dict{String,<:Any}) + +Immediately initiates rotation of the key material of the specified symmetric encryption +KMS key. You can perform on-demand rotation of the key material in customer managed KMS +keys, regardless of whether or not automatic key rotation is enabled. On-demand rotations +do not change existing automatic rotation schedules. For example, consider a KMS key that +has automatic key rotation enabled with a rotation period of 730 days. If the key is +scheduled to automatically rotate on April 14, 2024, and you perform an on-demand rotation +on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024 and +every 730 days thereafter. You can perform on-demand key rotation a maximum of 10 times +per KMS key. You can use the KMS console to view the number of remaining on-demand +rotations available for a KMS key. You can use GetKeyRotationStatus to identify any in +progress on-demand rotations. You can use ListKeyRotations to identify the date that +completed on-demand rotations were performed. You can monitor rotation of the key material +for your KMS keys in CloudTrail and Amazon CloudWatch. On-demand key rotation is supported +only on symmetric encryption KMS keys. You cannot perform on-demand rotation of asymmetric +KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key +store. To perform on-demand rotation of a set of related multi-Region keys, invoke the +on-demand rotation on the primary key. You cannot initiate on-demand rotation of Amazon Web +Services managed KMS keys. KMS always rotates the key material of Amazon Web Services +managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the +Amazon Web Services service that owns the key. The KMS key that you use for this operation +must be in a compatible key state. For details, see Key states of KMS keys in the Key +Management Service Developer Guide. Cross-account use: No. You cannot perform this +operation on a KMS key in a different Amazon Web Services account. Required permissions: +kms:RotateKeyOnDemand (key policy) Related operations: EnableKeyRotation +DisableKeyRotation GetKeyRotationStatus ListKeyRotations Eventual consistency: +The KMS API follows an eventual consistency model. For more information, see KMS eventual +consistency. + +# Arguments +- `key_id`: Identifies a symmetric encryption KMS key. You cannot perform on-demand + rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS + keys in a custom key store. To perform on-demand rotation of a set of related multi-Region + keys, invoke the on-demand rotation on the primary key. Specify the key ID or key ARN of + the KMS key. For example: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: + arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the + key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + +""" +function rotate_key_on_demand(KeyId; aws_config::AbstractAWSConfig=global_aws_config()) + return kms( + "RotateKeyOnDemand", + Dict{String,Any}("KeyId" => KeyId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function rotate_key_on_demand( + KeyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kms( + "RotateKeyOnDemand", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("KeyId" => KeyId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ schedule_key_deletion(key_id) schedule_key_deletion(key_id, params::Dict{String,<:Any}) @@ -3258,30 +3613,30 @@ KMS key. After the waiting period ends, KMS deletes the KMS key, its key materia KMS data associated with it, including all aliases that refer to it. Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region -replica key, or an asymmetric or HMAC KMS key with imported key material[BUGBUG-link to -importing-keys-managing.html#import-delete-key.) To prevent the use of a KMS key without -deleting it, use DisableKey. You can schedule the deletion of a multi-Region primary key -and its replica keys at any time. However, KMS will not delete a multi-Region primary key -with existing replica keys. If you schedule the deletion of a primary key with replicas, -its key state changes to PendingReplicaDeletion and it cannot be replicated or used in -cryptographic operations. This status can continue indefinitely. When the last of its -replicas keys is deleted (not just scheduled), the key state of the primary key changes to -PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see -Deleting multi-Region keys in the Key Management Service Developer Guide. When KMS deletes -a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key -material from the associated CloudHSM cluster. However, you might need to manually delete -the orphaned key material from the cluster and its backups. Deleting a KMS key from an -external key store has no effect on the associated external key. However, for both types of -custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt -ciphertext encrypted under the KMS key by using only its associated external key or -CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a -new KMS key with the same key material. For more information about scheduling a KMS key for -deletion, see Deleting KMS keys in the Key Management Service Developer Guide. The KMS key -that you use for this operation must be in a compatible key state. For details, see Key -states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. -You cannot perform this operation on a KMS key in a different Amazon Web Services account. -Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations -CancelKeyDeletion DisableKey +replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the +use of a KMS key without deleting it, use DisableKey. You can schedule the deletion of a +multi-Region primary key and its replica keys at any time. However, KMS will not delete a +multi-Region primary key with existing replica keys. If you schedule the deletion of a +primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be +replicated or used in cryptographic operations. This status can continue indefinitely. When +the last of its replicas keys is deleted (not just scheduled), the key state of the primary +key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For +details, see Deleting multi-Region keys in the Key Management Service Developer Guide. When +KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the +associated key material from the associated CloudHSM cluster. However, you might need to +manually delete the orphaned key material from the cluster and its backups. Deleting a KMS +key from an external key store has no effect on the associated external key. However, for +both types of custom key stores, deleting a KMS key is destructive and irreversible. You +cannot decrypt ciphertext encrypted under the KMS key by using only its associated external +key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by +creating a new KMS key with the same key material. For more information about scheduling a +KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide. +The KMS key that you use for this operation must be in a compatible key state. For details, +see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account +use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services +account. Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations + CancelKeyDeletion DisableKey Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: The unique identifier of the KMS key to delete. Specify the key ID or key ARN @@ -3352,6 +3707,8 @@ state. For details, see Key states of KMS keys in the Key Management Service Dev Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:Sign (key policy) Related operations: Verify +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric @@ -3374,6 +3731,9 @@ parameter. Required permissions: kms:Sign (key policy) Related operations: Ver # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -3451,7 +3811,8 @@ operation must be in a compatible key state. For details, see Key states of KMS Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:TagResource (key policy) Related operations CreateKey ListResourceTags -ReplicateKey UntagResource +ReplicateKey UntagResource Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies a customer managed key in the account and Region. Specify the key ID @@ -3506,7 +3867,9 @@ Reference. The KMS key that you use for this operation must be in a compatible For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:UntagResource (key policy) Related -operations CreateKey ListResourceTags ReplicateKey TagResource +operations CreateKey ListResourceTags ReplicateKey TagResource Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `key_id`: Identifies the KMS key from which you are removing tags. Specify the key ID or @@ -3566,7 +3929,8 @@ key in a different Amazon Web Services account. Required permissions kms:U on the alias (IAM policy). kms:UpdateAlias on the current KMS key (key policy). kms:UpdateAlias on the new KMS key (key policy). For details, see Controlling access to aliases in the Key Management Service Developer Guide. Related operations: CreateAlias - DeleteAlias ListAliases + DeleteAlias ListAliases Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `alias_name`: Identifies the alias that is changing its KMS key. This value must begin @@ -3664,7 +4028,9 @@ path. If the operation succeeds, it returns a JSON object with no properties. Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:UpdateCustomKeyStore (IAM policy) Related operations: ConnectCustomKeyStore CreateCustomKeyStore -DeleteCustomKeyStore DescribeCustomKeyStores DisconnectCustomKeyStore +DeleteCustomKeyStore DescribeCustomKeyStores DisconnectCustomKeyStore Eventual +consistency: The KMS API follows an eventual consistency model. For more information, see +KMS eventual consistency. # Arguments - `custom_key_store_id`: Identifies the custom key store that you want to update. Enter the @@ -3770,7 +4136,8 @@ Updates the description of a KMS key. To see the description of a KMS key, use D details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:UpdateKeyDescription (key policy) -Related operations CreateKey DescribeKey +Related operations CreateKey DescribeKey Eventual consistency: The KMS API +follows an eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `description`: New description for the KMS key. Do not include confidential or sensitive @@ -3852,7 +4219,8 @@ different Amazon Web Services account. Required permissions: kms:UpdatePri on the current primary key (in the primary key's Region). Include this permission primary key's key policy. kms:UpdatePrimaryRegion on the current replica key (in the replica key's Region). Include this permission in the replica key's key policy. Related -operations CreateKey ReplicateKey +operations CreateKey ReplicateKey Eventual consistency: The KMS API follows an +eventual consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the current primary key. When the operation completes, this KMS key @@ -3926,6 +4294,8 @@ state. For details, see Key states of KMS keys in the Key Management Service Dev Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:Verify (key policy) Related operations: Sign +Eventual consistency: The KMS API follows an eventual consistency model. For more +information, see KMS eventual consistency. # Arguments - `key_id`: Identifies the asymmetric KMS key that will be used to verify the signature. @@ -3949,6 +4319,9 @@ parameter. Required permissions: kms:Verify (key policy) Related operations: # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -4034,7 +4407,8 @@ operation must be in a compatible key state. For details, see Key states of KMS Key Management Service Developer Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:VerifyMac (key policy) -Related operations: GenerateMac +Related operations: GenerateMac Eventual consistency: The KMS API follows an eventual +consistency model. For more information, see KMS eventual consistency. # Arguments - `key_id`: The KMS key that will be used in the verification. Enter a key ID of the KMS @@ -4053,6 +4427,9 @@ Related operations: GenerateMac # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service diff --git a/src/services/lakeformation.jl b/src/services/lakeformation.jl index 4cb56b67ba..05182f51bd 100644 --- a/src/services/lakeformation.jl +++ b/src/services/lakeformation.jl @@ -313,6 +313,95 @@ function create_data_cells_filter( ) end +""" + create_lake_formation_identity_center_configuration() + create_lake_formation_identity_center_configuration(params::Dict{String,<:Any}) + +Creates an IAM Identity Center connection with Lake Formation to allow IAM Identity Center +users and groups to access Data Catalog resources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CatalogId"`: The identifier for the Data Catalog. By default, the account ID. The Data + Catalog is the persistent metadata store. It contains database definitions, table + definitions, view definitions, and other control information to manage your Lake Formation + environment. +- `"ExternalFiltering"`: A list of the account IDs of Amazon Web Services accounts of + third-party applications that are allowed to access data managed by Lake Formation. +- `"InstanceArn"`: The ARN of the IAM Identity Center instance for which the operation will + be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon + Web Services Service Namespaces in the Amazon Web Services General Reference. +- `"ShareRecipients"`: A list of Amazon Web Services account IDs and/or Amazon Web Services + organization/organizational unit ARNs that are allowed to access data managed by Lake + Formation. If the ShareRecipients list includes valid values, a resource share is created + with the principals you want to have access to the resources. If the ShareRecipients value + is null or the list is empty, no resource share is created. +""" +function create_lake_formation_identity_center_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/CreateLakeFormationIdentityCenterConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_lake_formation_identity_center_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/CreateLakeFormationIdentityCenterConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_lake_formation_opt_in(principal, resource) + create_lake_formation_opt_in(principal, resource, params::Dict{String,<:Any}) + +Enforce Lake Formation permissions for the given databases, tables, and principals. + +# Arguments +- `principal`: +- `resource`: + +""" +function create_lake_formation_opt_in( + Principal, Resource; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/CreateLakeFormationOptIn", + Dict{String,Any}("Principal" => Principal, "Resource" => Resource); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_lake_formation_opt_in( + Principal, + Resource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lakeformation( + "POST", + "/CreateLakeFormationOptIn", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Principal" => Principal, "Resource" => Resource), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_lftag(tag_key, tag_values) create_lftag(tag_key, tag_values, params::Dict{String,<:Any}) @@ -392,6 +481,85 @@ function delete_data_cells_filter( ) end +""" + delete_lake_formation_identity_center_configuration() + delete_lake_formation_identity_center_configuration(params::Dict{String,<:Any}) + +Deletes an IAM Identity Center connection with Lake Formation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CatalogId"`: The identifier for the Data Catalog. By default, the account ID. The Data + Catalog is the persistent metadata store. It contains database definitions, table + definitions, view definition, and other control information to manage your Lake Formation + environment. +""" +function delete_lake_formation_identity_center_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/DeleteLakeFormationIdentityCenterConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_lake_formation_identity_center_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/DeleteLakeFormationIdentityCenterConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_lake_formation_opt_in(principal, resource) + delete_lake_formation_opt_in(principal, resource, params::Dict{String,<:Any}) + +Remove the Lake Formation permissions enforcement of the given databases, tables, and +principals. + +# Arguments +- `principal`: +- `resource`: + +""" +function delete_lake_formation_opt_in( + Principal, Resource; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/DeleteLakeFormationOptIn", + Dict{String,Any}("Principal" => Principal, "Resource" => Resource); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_lake_formation_opt_in( + Principal, + Resource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lakeformation( + "POST", + "/DeleteLakeFormationOptIn", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Principal" => Principal, "Resource" => Resource), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_lftag(tag_key) delete_lftag(tag_key, params::Dict{String,<:Any}) @@ -540,6 +708,40 @@ function deregister_resource( ) end +""" + describe_lake_formation_identity_center_configuration() + describe_lake_formation_identity_center_configuration(params::Dict{String,<:Any}) + +Retrieves the instance ARN and application ARN for the connection. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CatalogId"`: The identifier for the Data Catalog. By default, the account ID. The Data + Catalog is the persistent metadata store. It contains database definitions, table + definitions, and other control information to manage your Lake Formation environment. +""" +function describe_lake_formation_identity_center_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/DescribeLakeFormationIdentityCenterConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_lake_formation_identity_center_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/DescribeLakeFormationIdentityCenterConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_resource(resource_arn) describe_resource(resource_arn, params::Dict{String,<:Any}) @@ -702,6 +904,33 @@ function get_data_cells_filter( ) end +""" + get_data_lake_principal() + get_data_lake_principal(params::Dict{String,<:Any}) + +Returns the identity of the invoking principal. + +""" +function get_data_lake_principal(; aws_config::AbstractAWSConfig=global_aws_config()) + return lakeformation( + "POST", + "/GetDataLakePrincipal"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_lake_principal( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/GetDataLakePrincipal", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_data_lake_settings() get_data_lake_settings(params::Dict{String,<:Any}) @@ -986,8 +1215,8 @@ function get_table_objects( end """ - get_temporary_glue_partition_credentials(partition, supported_permission_types, table_arn) - get_temporary_glue_partition_credentials(partition, supported_permission_types, table_arn, params::Dict{String,<:Any}) + get_temporary_glue_partition_credentials(partition, table_arn) + get_temporary_glue_partition_credentials(partition, table_arn, params::Dict{String,<:Any}) This API is identical to GetTemporaryTableCredentials except that this is used when the target Data Catalog resource is of type Partition. Lake Formation restricts the permission @@ -996,8 +1225,6 @@ single Amazon S3 prefix. # Arguments - `partition`: A list of partition values identifying a single partition. -- `supported_permission_types`: A list of supported permission types for the partition. - Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. - `table_arn`: The ARN of the partitions' table. # Optional Parameters @@ -1008,28 +1235,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the temporary credentials. - `"Permissions"`: Filters the request based on the user having been granted a list of specified permissions on the requested resource(s). +- `"SupportedPermissionTypes"`: A list of supported permission types for the partition. + Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. """ function get_temporary_glue_partition_credentials( - Partition, - SupportedPermissionTypes, - TableArn; - aws_config::AbstractAWSConfig=global_aws_config(), + Partition, TableArn; aws_config::AbstractAWSConfig=global_aws_config() ) return lakeformation( "POST", "/GetTemporaryGluePartitionCredentials", - Dict{String,Any}( - "Partition" => Partition, - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ); + Dict{String,Any}("Partition" => Partition, "TableArn" => TableArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_temporary_glue_partition_credentials( Partition, - SupportedPermissionTypes, TableArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1040,11 +1261,7 @@ function get_temporary_glue_partition_credentials( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "Partition" => Partition, - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ), + Dict{String,Any}("Partition" => Partition, "TableArn" => TableArn), params, ), ); @@ -1054,8 +1271,8 @@ function get_temporary_glue_partition_credentials( end """ - get_temporary_glue_table_credentials(supported_permission_types, table_arn) - get_temporary_glue_table_credentials(supported_permission_types, table_arn, params::Dict{String,<:Any}) + get_temporary_glue_table_credentials(table_arn) + get_temporary_glue_table_credentials(table_arn, params::Dict{String,<:Any}) Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a @@ -1063,8 +1280,6 @@ registered location, for example an Amazon S3 bucket, with a scope down policy w restricts the access to a single prefix. # Arguments -- `supported_permission_types`: A list of supported permission types for the table. Valid - values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. - `table_arn`: The ARN identifying a table in the Data Catalog for the temporary credentials request. @@ -1076,22 +1291,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the temporary credentials. - `"Permissions"`: Filters the request based on the user having been granted a list of specified permissions on the requested resource(s). +- `"QuerySessionContext"`: A structure used as a protocol between query engines and Lake + Formation or Glue. Contains both a Lake Formation generated authorization identifier and + information from the request's authorization context. +- `"S3Path"`: The Amazon S3 path for the table. +- `"SupportedPermissionTypes"`: A list of supported permission types for the table. Valid + values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. """ function get_temporary_glue_table_credentials( - SupportedPermissionTypes, TableArn; aws_config::AbstractAWSConfig=global_aws_config() + TableArn; aws_config::AbstractAWSConfig=global_aws_config() ) return lakeformation( "POST", "/GetTemporaryGlueTableCredentials", - Dict{String,Any}( - "SupportedPermissionTypes" => SupportedPermissionTypes, "TableArn" => TableArn - ); + Dict{String,Any}("TableArn" => TableArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_temporary_glue_table_credentials( - SupportedPermissionTypes, TableArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1100,14 +1318,7 @@ function get_temporary_glue_table_credentials( "POST", "/GetTemporaryGlueTableCredentials", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ), - params, - ), + mergewith(_merge, Dict{String,Any}("TableArn" => TableArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1310,6 +1521,40 @@ function list_data_cells_filter( ) end +""" + list_lake_formation_opt_ins() + list_lake_formation_opt_ins(params::Dict{String,<:Any}) + +Retrieve the current list of resources and principals that are opt in to enforce Lake +Formation permissions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return. +- `"NextToken"`: A continuation token, if this is not the first call to retrieve this list. +- `"Principal"`: +- `"Resource"`: A structure for the resource. +""" +function list_lake_formation_opt_ins(; aws_config::AbstractAWSConfig=global_aws_config()) + return lakeformation( + "POST", + "/ListLakeFormationOptIns"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lake_formation_opt_ins( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/ListLakeFormationOptIns", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_lftags() list_lftags(params::Dict{String,<:Any}) @@ -1573,6 +1818,9 @@ arn:aws:iam::12345:role/my-data-access-role # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HybridAccessEnabled"`: Specifies whether the data access of tables pointing to the + location can be managed by both Lake Formation permissions as well as Amazon S3 bucket + policies. - `"RoleArn"`: The identifier for the role that registers the resource. - `"UseServiceLinkedRole"`: Designates an Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role @@ -1925,6 +2173,51 @@ function update_data_cells_filter( ) end +""" + update_lake_formation_identity_center_configuration() + update_lake_formation_identity_center_configuration(params::Dict{String,<:Any}) + +Updates the IAM Identity Center connection parameters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationStatus"`: Allows to enable or disable the IAM Identity Center connection. +- `"CatalogId"`: The identifier for the Data Catalog. By default, the account ID. The Data + Catalog is the persistent metadata store. It contains database definitions, table + definitions, view definitions, and other control information to manage your Lake Formation + environment. +- `"ExternalFiltering"`: A list of the account IDs of Amazon Web Services accounts of + third-party applications that are allowed to access data managed by Lake Formation. +- `"ShareRecipients"`: A list of Amazon Web Services account IDs or Amazon Web Services + organization/organizational unit ARNs that are allowed to access to access data managed by + Lake Formation. If the ShareRecipients list includes valid values, then the resource share + is updated with the principals you want to have access to the resources. If the + ShareRecipients value is null, both the list of share recipients and the resource share + remain unchanged. If the ShareRecipients value is an empty list, then the existing share + recipients list will be cleared, and the resource share will be deleted. +""" +function update_lake_formation_identity_center_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/UpdateLakeFormationIdentityCenterConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_lake_formation_identity_center_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lakeformation( + "POST", + "/UpdateLakeFormationIdentityCenterConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_lftag(tag_key) update_lftag(tag_key, params::Dict{String,<:Any}) @@ -1980,6 +2273,9 @@ Lake Formation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HybridAccessEnabled"`: Specifies whether the data access of tables pointing to the + location can be managed by both Lake Formation permissions as well as Amazon S3 bucket + policies. - `"WithFederation"`: Whether or not the resource is a federated resource. """ function update_resource( diff --git a/src/services/lambda.jl b/src/services/lambda.jl index 5182bc5162..2846e91a18 100644 --- a/src/services/lambda.jl +++ b/src/services/lambda.jl @@ -101,9 +101,9 @@ more information about function policies, see Using resource-based policies for # Arguments - `action`: The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction. -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -190,10 +190,11 @@ alias to split invocation requests between two versions. Use the RoutingConfig p specify a second version and the percentage of invocation requests that it receives. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. - `function_version`: The function version that the alias invokes. - `name`: The name of the alias. @@ -303,12 +304,12 @@ following topics. Amazon DynamoDB Streams Amazon Kinesis Amazon SQ Amazon MQ and RabbitMQ Amazon MSK Apache Kafka Amazon DocumentDB # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - MyFunction. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - Partial ARN – 123456789012:function:MyFunction. The length constraint applies only to - the full ARN. If you specify only the function name, it's limited to 64 characters in - length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – MyFunction. Function ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction. Version or Alias ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. Partial ARN – + 123456789012:function:MyFunction. The length constraint applies only to the full ARN. If + you specify only the function name, it's limited to 64 characters in length. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -325,8 +326,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 100. Max 10,000. - `"BisectBatchOnFunctionError"`: (Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. -- `"DestinationConfig"`: (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or - standard Amazon SNS topic destination for discarded records. +- `"DestinationConfig"`: (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka + only) A configuration object that specifies the destination of an event after Lambda + processes it. - `"DocumentDBEventSourceConfig"`: Specific configuration settings for a DocumentDB event source. - `"Enabled"`: When true, the event source mapping is active. When false, Lambda pauses @@ -334,8 +336,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"EventSourceArn"`: The Amazon Resource Name (ARN) of the event source. Amazon Kinesis – The ARN of the data stream or a stream consumer. Amazon DynamoDB Streams – The ARN of the stream. Amazon Simple Queue Service – The ARN of the queue. Amazon Managed - Streaming for Apache Kafka – The ARN of the cluster. Amazon MQ – The ARN of the - broker. Amazon DocumentDB – The ARN of the DocumentDB change stream. + Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for + cross-account event source mappings). Amazon MQ – The ARN of the broker. Amazon + DocumentDB – The ARN of the DocumentDB change stream. - `"FilterCriteria"`: An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering. - `"FunctionResponseTypes"`: (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current @@ -367,10 +370,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceAccessConfigurations"`: An array of authentication protocols or VPC components required to secure your event source. - `"StartingPosition"`: The position in a stream from which to start reading. Required for - Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is supported - only for Amazon Kinesis streams and Amazon DocumentDB. + Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for + Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka. - `"StartingPositionTimestamp"`: With StartingPosition set to AT_TIMESTAMP, the time from - which to start reading. + which to start reading. StartingPositionTimestamp cannot be in the future. - `"Topics"`: The name of the Kafka topic. - `"TumblingWindowInSeconds"`: (Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds @@ -448,8 +451,8 @@ Lambda functions. # Arguments - `code`: The code for the function. -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -469,7 +472,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Environment"`: Environment variables that are accessible from function code during execution. - `"EphemeralStorage"`: The size of the function's /tmp directory in MB. The default value - is 512, but can be any whole number between 512 and 10,240 MB. + is 512, but can be any whole number between 512 and 10,240 MB. For more information, see + Configuring ephemeral storage (console). - `"FileSystemConfigs"`: Connection settings for an Amazon EFS file system. - `"Handler"`: The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format @@ -486,6 +490,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys customer managed key, Lambda uses a default service key. - `"Layers"`: A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version. +- `"LoggingConfig"`: The function's Amazon CloudWatch Logs configuration settings. - `"MemorySize"`: The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. @@ -554,8 +559,8 @@ is a dedicated HTTP(S) endpoint that you can use to invoke your function. you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs. -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -606,10 +611,11 @@ end Deletes a Lambda function alias. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. - `name`: The name of the alias. """ @@ -711,15 +717,16 @@ end delete_function(function_name, params::Dict{String,<:Any}) Deletes a Lambda function. To delete a specific function version, use the Qualifier -parameter. Otherwise, all versions and aliases are deleted. To delete Lambda event source -mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and -resources that invoke your function directly, delete the trigger in the service where you -originally configured it. +parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user +to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that +invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and resources that +invoke your function directly, delete the trigger in the service where you originally +configured it. # Arguments -- `function_name`: The name of the Lambda function or version. Name formats Function - name – my-function (name-only), my-function:1 (with version). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function or version. Name formats + Function name – my-function (name-only), my-function:1 (with version). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -758,10 +765,11 @@ end Removes the code signing configuration from the function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. """ function delete_function_code_signing_config( @@ -795,8 +803,8 @@ end Removes a concurrent execution limit from a function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -834,8 +842,8 @@ Deletes the configuration for asynchronous invocation for a function, version, o configure options for asynchronous invocation, use PutFunctionEventInvokeConfig. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN - 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the @@ -877,8 +885,8 @@ Deletes a Lambda function URL. When you delete a function URL, you can't recover Creating a new function URL results in a different URL address. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -956,8 +964,8 @@ end Deletes the provisioned concurrency configuration for a function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1026,10 +1034,11 @@ end Returns details about a Lambda function alias. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. - `name`: The name of the alias. """ @@ -1131,9 +1140,9 @@ deployment package that's valid for 10 minutes. If you specify a function versio details that are specific to that version are returned. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1172,10 +1181,11 @@ end Returns the code signing configuration for the specified function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. """ function get_function_code_signing_config( @@ -1210,8 +1220,8 @@ Returns details about the reserved concurrency configuration for a function. To concurrency limit for a function, use PutFunctionConcurrency. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1251,9 +1261,9 @@ UpdateFunctionConfiguration. To get all of a function's details, including funct settings, use GetFunction. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1295,8 +1305,8 @@ Retrieves the configuration for asynchronous invocation for a function, version, To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN - 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the @@ -1337,8 +1347,8 @@ end Returns details about a Lambda function URL. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1484,9 +1494,9 @@ end Returns the resource-based IAM policy for a function, version, or alias. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1524,8 +1534,8 @@ end Retrieves the provisioned concurrency configuration for a function's alias or version. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1570,8 +1580,8 @@ mode. If the runtime update mode is Auto or Function update, this includes the r update mode and null is returned for the ARN. For more information, see Runtime updates. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1611,23 +1621,26 @@ end invoke(function_name, params::Dict{String,<:Any}) Invokes a Lambda function. You can invoke a function synchronously (and wait for the -response), or asynchronously. To invoke a function asynchronously, set InvocationType to -Event. For synchronous invocation, details about the function response, including errors, -are included in the response body and headers. For either invocation type, you can find -more information in the execution log and trace. When an error occurs, your function may be -invoked multiple times. Retry behavior varies by error type, client, event source, and -invocation type. For example, if you invoke a function asynchronously and it returns an -error, Lambda executes the function up to two more times. For more information, see Error -handling and automatic retries in Lambda. For asynchronous invocation, Lambda adds events -to a queue before sending them to your function. If your function does not have enough -capacity to keep up with the queue, events may be lost. Occasionally, your function may -receive the same event multiple times, even if no error occurs. To retain events that were -not processed, configure your function with a dead-letter queue. The status code in the API -response doesn't reflect function errors. Error codes are reserved for errors that prevent -your function from executing, such as permissions errors, quota errors, or issues with your -function's code and configuration. For example, Lambda returns TooManyRequestsException if -running the function would cause you to exceed a concurrency limit at either the account -level (ConcurrentInvocationLimitExceeded) or function level +response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. +theInvocationType is RequestResponse). To invoke a function asynchronously, set +InvocationType to Event. Lambda passes the ClientContext object to your function for +synchronous invocations only. For synchronous invocation, details about the function +response, including errors, are included in the response body and headers. For either +invocation type, you can find more information in the execution log and trace. When an +error occurs, your function may be invoked multiple times. Retry behavior varies by error +type, client, event source, and invocation type. For example, if you invoke a function +asynchronously and it returns an error, Lambda executes the function up to two more times. +For more information, see Error handling and automatic retries in Lambda. For asynchronous +invocation, Lambda adds events to a queue before sending them to your function. If your +function does not have enough capacity to keep up with the queue, events may be lost. +Occasionally, your function may receive the same event multiple times, even if no error +occurs. To retain events that were not processed, configure your function with a +dead-letter queue. The status code in the API response doesn't reflect function errors. +Error codes are reserved for errors that prevent your function from executing, such as +permissions errors, quota errors, or issues with your function's code and configuration. +For example, Lambda returns TooManyRequestsException if running the function would cause +you to exceed a concurrency limit at either the account level +(ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded). For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long @@ -1636,9 +1649,9 @@ lambda:InvokeFunction action. For details on how to set up permissions for cross invocations, see Granting function access to other accounts. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1650,7 +1663,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specify a file path. For example, --payload file://payload.json. - `"Qualifier"`: Specify a version or alias to invoke a published version of the function. - `"X-Amz-Client-Context"`: Up to 3,583 bytes of base64-encoded data about the invoking - client to pass to the function in the context object. + client to pass to the function in the context object. Lambda passes the ClientContext + object to your function for synchronous invocations only. - `"X-Amz-Invocation-Type"`: Choose from the following options. RequestResponse (default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response @@ -1687,11 +1701,14 @@ end invoke_async(function_name, invoke_args) invoke_async(function_name, invoke_args, params::Dict{String,<:Any}) - For asynchronous function invocation, use Invoke. Invokes a function asynchronously. + For asynchronous function invocation, use Invoke. Invokes a function asynchronously. If +you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active +tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned +on. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1736,8 +1753,8 @@ permission for the lambda:InvokeFunction action. For details on how to set up pe for cross-account invocations, see Granting function access to other accounts. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1789,10 +1806,11 @@ end Returns a list of aliases for a Lambda function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1870,14 +1888,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"EventSourceArn"`: The Amazon Resource Name (ARN) of the event source. Amazon Kinesis – The ARN of the data stream or a stream consumer. Amazon DynamoDB Streams – The ARN of the stream. Amazon Simple Queue Service – The ARN of the queue. Amazon Managed - Streaming for Apache Kafka – The ARN of the cluster. Amazon MQ – The ARN of the - broker. Amazon DocumentDB – The ARN of the DocumentDB change stream. -- `"FunctionName"`: The name of the Lambda function. Name formats Function name – - MyFunction. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - Partial ARN – 123456789012:function:MyFunction. The length constraint applies only to - the full ARN. If you specify only the function name, it's limited to 64 characters in - length. + Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for + cross-account event source mappings). Amazon MQ – The ARN of the broker. Amazon + DocumentDB – The ARN of the DocumentDB change stream. +- `"FunctionName"`: The name or ARN of the Lambda function. Name formats Function name + – MyFunction. Function ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction. Version or Alias ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. Partial ARN – + 123456789012:function:MyFunction. The length constraint applies only to the full ARN. If + you specify only the function name, it's limited to 64 characters in length. - `"Marker"`: A pagination token returned by a previous call. - `"MaxItems"`: The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set @@ -1911,11 +1930,11 @@ Retrieves a list of configurations for asynchronous invocation for a function. T options for asynchronous invocation, use PutFunctionEventInvokeConfig. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - my-function. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. - Partial ARN - 123456789012:function:my-function. The length constraint applies only to - the full ARN. If you specify only the function name, it is limited to 64 characters in - length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - my-function. Function ARN - + arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN - + 123456789012:function:my-function. The length constraint applies only to the full ARN. If + you specify only the function name, it is limited to 64 characters in length. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1954,8 +1973,8 @@ end Returns a list of Lambda function URLs for the specified function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2095,7 +2114,7 @@ are compatible with that architecture. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CompatibleArchitecture"`: The compatible instruction set architecture. -- `"CompatibleRuntime"`: A runtime identifier. For example, go1.x. The following list +- `"CompatibleRuntime"`: A runtime identifier. For example, java21. The following list includes deprecated runtimes. For more information, see Runtime deprecation policy. - `"Marker"`: A pagination token returned by a previous call. - `"MaxItems"`: The maximum number of versions to return. @@ -2134,7 +2153,7 @@ that instruction set architecture. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CompatibleArchitecture"`: The compatible instruction set architecture. -- `"CompatibleRuntime"`: A runtime identifier. For example, go1.x. The following list +- `"CompatibleRuntime"`: A runtime identifier. For example, java21. The following list includes deprecated runtimes. For more information, see Runtime deprecation policy. - `"Marker"`: A pagination token returned by a previous call. - `"MaxItems"`: The maximum number of layers to return. @@ -2163,8 +2182,8 @@ end Retrieves a list of provisioned concurrency configurations for a function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2238,10 +2257,11 @@ Returns a list of versions, with the version-specific configuration of each. Lam up to 50 versions per call. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2335,10 +2355,11 @@ function before publishing a version. Clients can invoke versions directly or wi alias. To create an alias, use CreateAlias. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2385,10 +2406,11 @@ function. # Arguments - `code_signing_config_arn`: The The Amazon Resource Name (ARN) of the code signing configuration. -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. """ function put_function_code_signing_config( @@ -2438,8 +2460,8 @@ at least 100 simultaneous executions unreserved for functions that aren't config per-function limit. For more information, see Lambda function scaling. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2501,8 +2523,8 @@ fail all processing attempts (on-failure). You can configure destinations in add instead of a dead-letter queue. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN - 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the @@ -2551,8 +2573,8 @@ end Adds a provisioned concurrency configuration to a function's alias or version. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2611,8 +2633,8 @@ Sets the runtime management configuration for a function's version. For more inf see Runtime updates. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2719,9 +2741,9 @@ Revokes function-use permission from an Amazon Web Service or another Amazon Web account. You can get the ID of the statement from the output of GetPolicy. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name – my-function (name-only), my-function:v1 (with alias). Function ARN + – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2837,10 +2859,11 @@ end Updates the configuration of a Lambda function alias. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name - - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to the - full ARN. If you specify only the function name, it is limited to 64 characters in length. +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + - MyFunction. Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + Partial ARN - 123456789012:function:MyFunction. The length constraint applies only to + the full ARN. If you specify only the function name, it is limited to 64 characters in + length. - `name`: The name of the alias. # Optional Parameters @@ -2952,20 +2975,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 100. Max 10,000. - `"BisectBatchOnFunctionError"`: (Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. -- `"DestinationConfig"`: (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or - standard Amazon SNS topic destination for discarded records. +- `"DestinationConfig"`: (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka + only) A configuration object that specifies the destination of an event after Lambda + processes it. - `"DocumentDBEventSourceConfig"`: Specific configuration settings for a DocumentDB event source. - `"Enabled"`: When true, the event source mapping is active. When false, Lambda pauses polling and invocation. Default: True - `"FilterCriteria"`: An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering. -- `"FunctionName"`: The name of the Lambda function. Name formats Function name – - MyFunction. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - Partial ARN – 123456789012:function:MyFunction. The length constraint applies only to - the full ARN. If you specify only the function name, it's limited to 64 characters in - length. +- `"FunctionName"`: The name or ARN of the Lambda function. Name formats Function name + – MyFunction. Function ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction. Version or Alias ARN – + arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. Partial ARN – + 123456789012:function:MyFunction. The length constraint applies only to the full ARN. If + you specify only the function name, it's limited to 64 characters in length. - `"FunctionResponseTypes"`: (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping. - `"MaximumBatchingWindowInSeconds"`: The maximum amount of time, in seconds, that Lambda @@ -3035,8 +3059,8 @@ ECR, if you update the image tag to a new image, Lambda does not automatically u function. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -3107,8 +3131,8 @@ grant invoke permissions to an Amazon Web Services account or Amazon Web Service AddPermission. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -3122,7 +3146,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Environment"`: Environment variables that are accessible from function code during execution. - `"EphemeralStorage"`: The size of the function's /tmp directory in MB. The default value - is 512, but can be any whole number between 512 and 10,240 MB. + is 512, but can be any whole number between 512 and 10,240 MB. For more information, see + Configuring ephemeral storage (console). - `"FileSystemConfigs"`: Connection settings for an Amazon EFS file system. - `"Handler"`: The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format @@ -3139,6 +3164,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys customer managed key, Lambda uses a default service key. - `"Layers"`: A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version. +- `"LoggingConfig"`: The function's Amazon CloudWatch Logs configuration settings. - `"MemorySize"`: The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. @@ -3192,8 +3218,8 @@ Updates the configuration for asynchronous invocation for a function, version, o configure options for asynchronous invocation, use PutFunctionEventInvokeConfig. # Arguments -- `function_name`: The name of the Lambda function, version, or alias. Name formats - Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - +- `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats + Function name - my-function (name-only), my-function:v1 (with alias). Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN - 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the @@ -3242,8 +3268,8 @@ end Updates the configuration for a Lambda function URL. # Arguments -- `function_name`: The name of the Lambda function. Name formats Function name – - my-function. Function ARN – +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. diff --git a/src/services/launch_wizard.jl b/src/services/launch_wizard.jl new file mode 100644 index 0000000000..d8880a18a6 --- /dev/null +++ b/src/services/launch_wizard.jl @@ -0,0 +1,494 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: launch_wizard +using AWS.Compat +using AWS.UUIDs + +""" + create_deployment(deployment_pattern_name, name, specifications, workload_name) + create_deployment(deployment_pattern_name, name, specifications, workload_name, params::Dict{String,<:Any}) + +Creates a deployment for the given workload. Deployments created by this operation are not +available in the Launch Wizard console to use the Clone deployment action on. + +# Arguments +- `deployment_pattern_name`: The name of the deployment pattern supported by a given + workload. You can use the ListWorkloadDeploymentPatterns operation to discover supported + values for this parameter. +- `name`: The name of the deployment. +- `specifications`: The settings specified for the deployment. These settings define how to + deploy and configure your resources created by the deployment. For more information about + the specifications required for creating a deployment for a SAP workload, see SAP + deployment specifications. To retrieve the specifications required to create a deployment + for other workloads, use the GetWorkloadDeploymentPattern operation. +- `workload_name`: The name of the workload. You can use the ListWorkloads operation to + discover supported values for this parameter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"tags"`: The tags to add to the deployment. +""" +function create_deployment( + deploymentPatternName, + name, + specifications, + workloadName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/createDeployment", + Dict{String,Any}( + "deploymentPatternName" => deploymentPatternName, + "name" => name, + "specifications" => specifications, + "workloadName" => workloadName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_deployment( + deploymentPatternName, + name, + specifications, + workloadName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/createDeployment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "deploymentPatternName" => deploymentPatternName, + "name" => name, + "specifications" => specifications, + "workloadName" => workloadName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_deployment(deployment_id) + delete_deployment(deployment_id, params::Dict{String,<:Any}) + +Deletes a deployment. + +# Arguments +- `deployment_id`: The ID of the deployment. + +""" +function delete_deployment(deploymentId; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", + "/deleteDeployment", + Dict{String,Any}("deploymentId" => deploymentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_deployment( + deploymentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/deleteDeployment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("deploymentId" => deploymentId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_deployment(deployment_id) + get_deployment(deployment_id, params::Dict{String,<:Any}) + +Returns information about the deployment. + +# Arguments +- `deployment_id`: The ID of the deployment. + +""" +function get_deployment(deploymentId; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", + "/getDeployment", + Dict{String,Any}("deploymentId" => deploymentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_deployment( + deploymentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/getDeployment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("deploymentId" => deploymentId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_workload(workload_name) + get_workload(workload_name, params::Dict{String,<:Any}) + +Returns information about a workload. + +# Arguments +- `workload_name`: The name of the workload. + +""" +function get_workload(workloadName; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", + "/getWorkload", + Dict{String,Any}("workloadName" => workloadName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workload( + workloadName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/getWorkload", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("workloadName" => workloadName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_workload_deployment_pattern(deployment_pattern_name, workload_name) + get_workload_deployment_pattern(deployment_pattern_name, workload_name, params::Dict{String,<:Any}) + +Returns details for a given workload and deployment pattern, including the available +specifications. You can use the ListWorkloads operation to discover the available workload +names and the ListWorkloadDeploymentPatterns operation to discover the available deployment +pattern names of a given workload. + +# Arguments +- `deployment_pattern_name`: The name of the deployment pattern. +- `workload_name`: The name of the workload. + +""" +function get_workload_deployment_pattern( + deploymentPatternName, workloadName; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "POST", + "/getWorkloadDeploymentPattern", + Dict{String,Any}( + "deploymentPatternName" => deploymentPatternName, "workloadName" => workloadName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_workload_deployment_pattern( + deploymentPatternName, + workloadName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/getWorkloadDeploymentPattern", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "deploymentPatternName" => deploymentPatternName, + "workloadName" => workloadName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_deployment_events(deployment_id) + list_deployment_events(deployment_id, params::Dict{String,<:Any}) + +Lists the events of a deployment. + +# Arguments +- `deployment_id`: The ID of the deployment. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. +- `"nextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function list_deployment_events( + deploymentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "POST", + "/listDeploymentEvents", + Dict{String,Any}("deploymentId" => deploymentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_deployment_events( + deploymentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/listDeploymentEvents", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("deploymentId" => deploymentId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_deployments() + list_deployments(params::Dict{String,<:Any}) + +Lists the deployments that have been created. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: Filters to scope the results. The following filters are supported: + WORKLOAD_NAME - The name used in deployments. DEPLOYMENT_STATUS - COMPLETED | CREATING | + DELETE_IN_PROGRESS | DELETE_INITIATING | DELETE_FAILED | DELETED | FAILED | IN_PROGRESS | + VALIDATING +- `"maxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. +- `"nextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function list_deployments(; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", "/listDeployments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_deployments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "POST", + "/listDeployments", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags associated with a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workload_deployment_patterns(workload_name) + list_workload_deployment_patterns(workload_name, params::Dict{String,<:Any}) + +Lists the workload deployment patterns for a given workload name. You can use the +ListWorkloads operation to discover the available workload names. + +# Arguments +- `workload_name`: The name of the workload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. +- `"nextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function list_workload_deployment_patterns( + workloadName; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "POST", + "/listWorkloadDeploymentPatterns", + Dict{String,Any}("workloadName" => workloadName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workload_deployment_patterns( + workloadName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/listWorkloadDeploymentPatterns", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("workloadName" => workloadName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_workloads() + list_workloads(params::Dict{String,<:Any}) + +Lists the available workload names. You can use the ListWorkloadDeploymentPatterns +operation to discover the available deployment patterns for a given workload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to return for this request. To get the next + page of items, make another request with the token returned in the output. +- `"nextToken"`: The token returned from a previous paginated request. Pagination continues + from the end of the items returned by the previous request. +""" +function list_workloads(; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", "/listWorkloads"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_workloads( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "POST", + "/listWorkloads", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds the specified tags to the given resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tags`: One or more tags to attach to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return launch_wizard( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes the specified tags from the given resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tag_keys`: Keys identifying the tags to remove. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return launch_wizard( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return launch_wizard( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/lex_models_v2.jl b/src/services/lex_models_v2.jl index b48a4d2a65..77bcf483cc 100644 --- a/src/services/lex_models_v2.jl +++ b/src/services/lex_models_v2.jl @@ -378,6 +378,7 @@ locale to a bot before you can add intents and slot types to the bot. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: A description of the bot locale. Use this to help identify the bot locale in lists. +- `"generativeAISettings"`: - `"voiceSettings"`: The Amazon Polly voice ID that Amazon Lex uses for voice interaction with the user. """ @@ -425,14 +426,56 @@ function create_bot_locale( ) end +""" + create_bot_replica(bot_id, replica_region) + create_bot_replica(bot_id, replica_region, params::Dict{String,<:Any}) + +Action to create a replication of the source bot in the secondary region. + +# Arguments +- `bot_id`: The request for the unique bot ID of the source bot to be replicated in the + secondary region. +- `replica_region`: The request for the secondary region that will be used in the + replication of the source bot. + +""" +function create_bot_replica( + botId, replicaRegion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "PUT", + "/bots/$(botId)/replicas/", + Dict{String,Any}("replicaRegion" => replicaRegion); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_bot_replica( + botId, + replicaRegion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "PUT", + "/bots/$(botId)/replicas/", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("replicaRegion" => replicaRegion), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_bot_version(bot_id, bot_version_locale_specification) create_bot_version(bot_id, bot_version_locale_specification, params::Dict{String,<:Any}) -Creates a new version of the bot based on the DRAFT version. If the DRAFT version of this -resource hasn't changed since you created the last version, Amazon Lex doesn't create a new -version, it returns the last created version. When you create the first version of a bot, -Amazon Lex sets the version to 1. Subsequent versions increment by 1. +Creates an immutable version of the bot. When you create the first version of a bot, Amazon +Lex sets the version number to 1. Subsequent bot versions increase in an increment of 1. +The version number will always represent the total number of versions created of the bot, +not the current number of versions. If a bot version is deleted, that bot version number +will not be reused. # Arguments - `bot_id`: The identifier of the bot to create the version for. @@ -612,6 +655,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys context should be active. - `"parentIntentSignature"`: A unique identifier for the built-in intent to base this intent on. +- `"qnAIntentConfiguration"`: Specifies the configuration of the built-in Amazon.QnAIntent. + The AMAZON.QnAIntent intent is called when Amazon Lex can't determine another intent to + invoke. If you specify this field, you can't specify the kendraConfiguration field. - `"sampleUtterances"`: An array of strings that a user might say to signal the intent. For example, \"I want a pizza\", or \"I want a {PizzaSize} pizza\". In an utterance, slot names are enclosed in curly braces (\"{\", \"}\") to indicate where they should be @@ -1114,6 +1160,42 @@ function delete_bot_locale( ) end +""" + delete_bot_replica(bot_id, replica_region) + delete_bot_replica(bot_id, replica_region, params::Dict{String,<:Any}) + +The action to delete the replicated bot in the secondary region. + +# Arguments +- `bot_id`: The unique ID of the replicated bot to be deleted from the secondary region +- `replica_region`: The secondary region of the replicated bot that will be deleted. + +""" +function delete_bot_replica( + botId, replicaRegion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "DELETE", + "/bots/$(botId)/replicas/$(replicaRegion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_bot_replica( + botId, + replicaRegion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "DELETE", + "/bots/$(botId)/replicas/$(replicaRegion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_bot_version(bot_id, bot_version) delete_bot_version(bot_id, bot_version, params::Dict{String,<:Any}) @@ -1720,6 +1802,90 @@ function describe_bot_recommendation( ) end +""" + describe_bot_replica(bot_id, replica_region) + describe_bot_replica(bot_id, replica_region, params::Dict{String,<:Any}) + +Monitors the bot replication status through the UI console. + +# Arguments +- `bot_id`: The request for the unique bot ID of the replicated bot being monitored. +- `replica_region`: The request for the region of the replicated bot being monitored. + +""" +function describe_bot_replica( + botId, replicaRegion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "GET", + "/bots/$(botId)/replicas/$(replicaRegion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_bot_replica( + botId, + replicaRegion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "GET", + "/bots/$(botId)/replicas/$(replicaRegion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_bot_resource_generation(bot_id, bot_version, generation_id, locale_id) + describe_bot_resource_generation(bot_id, bot_version, generation_id, locale_id, params::Dict{String,<:Any}) + +Returns information about a request to generate a bot through natural language description, +made through the StartBotResource API. Use the generatedBotLocaleUrl to retrieve the Amazon +S3 object containing the bot locale configuration. You can then modify and import this +configuration. + +# Arguments +- `bot_id`: The unique identifier of the bot for which to return the generation details. +- `bot_version`: The version of the bot for which to return the generation details. +- `generation_id`: The unique identifier of the generation request for which to return the + generation details. +- `locale_id`: The locale of the bot for which to return the generation details. + +""" +function describe_bot_resource_generation( + botId, + botVersion, + generationId, + localeId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "GET", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generations/$(generationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_bot_resource_generation( + botId, + botVersion, + generationId, + localeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "GET", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generations/$(generationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_bot_version(bot_id, bot_version) describe_bot_version(bot_id, bot_version, params::Dict{String,<:Any}) @@ -2166,6 +2332,49 @@ function describe_test_set_generation( ) end +""" + generate_bot_element(bot_id, bot_version, intent_id, locale_id) + generate_bot_element(bot_id, bot_version, intent_id, locale_id, params::Dict{String,<:Any}) + +Generates sample utterances for an intent. + +# Arguments +- `bot_id`: The bot unique Id for the bot request to generate utterances. +- `bot_version`: The bot version for the bot request to generate utterances. +- `intent_id`: The intent unique Id for the bot request to generate utterances. +- `locale_id`: The unique locale Id for the bot request to generate utterances. + +""" +function generate_bot_element( + botId, botVersion, intentId, localeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generate", + Dict{String,Any}("intentId" => intentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function generate_bot_element( + botId, + botVersion, + intentId, + localeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generate", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("intentId" => intentId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_test_execution_artifacts_url(test_execution_id) get_test_execution_artifacts_url(test_execution_id, params::Dict{String,<:Any}) @@ -2277,6 +2486,50 @@ function list_aggregated_utterances( ) end +""" + list_bot_alias_replicas(bot_id, replica_region) + list_bot_alias_replicas(bot_id, replica_region, params::Dict{String,<:Any}) + +The action to list the replicated bots created from the source bot alias. + +# Arguments +- `bot_id`: The request for the unique bot ID of the replicated bot created from the source + bot alias. +- `replica_region`: The request for the secondary region of the replicated bot created from + the source bot alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The request for maximum results to list the replicated bots created from + the source bot alias. +- `"nextToken"`: The request for the next token for the replicated bot created from the + source bot alias. +""" +function list_bot_alias_replicas( + botId, replicaRegion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/$(replicaRegion)/botaliases/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_bot_alias_replicas( + botId, + replicaRegion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/$(replicaRegion)/botaliases/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bot_aliases(bot_id) list_bot_aliases(bot_id, params::Dict{String,<:Any}) @@ -2408,6 +2661,124 @@ function list_bot_recommendations( ) end +""" + list_bot_replicas(bot_id) + list_bot_replicas(bot_id, params::Dict{String,<:Any}) + +The action to list the replicated bots. + +# Arguments +- `bot_id`: The request for the unique bot IDs in the list of replicated bots. + +""" +function list_bot_replicas(botId; aws_config::AbstractAWSConfig=global_aws_config()) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_bot_replicas( + botId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_bot_resource_generations(bot_id, bot_version, locale_id) + list_bot_resource_generations(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) + +Lists the generation requests made for a bot locale. + +# Arguments +- `bot_id`: The unique identifier of the bot whose generation requests you want to view. +- `bot_version`: The version of the bot whose generation requests you want to view. +- `locale_id`: The locale of the bot whose generation requests you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. +- `"nextToken"`: If the total number of results is greater than the number specified in the + maxResults, the response returns a token in the nextToken field. Use this token when making + a request to return the next batch of results. +- `"sortBy"`: An object containing information about the attribute and the method by which + to sort the results +""" +function list_bot_resource_generations( + botId, botVersion, localeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_bot_resource_generations( + botId, + botVersion, + localeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/generations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_bot_version_replicas(bot_id, replica_region) + list_bot_version_replicas(bot_id, replica_region, params::Dict{String,<:Any}) + +Contains information about all the versions replication statuses applicable for Global +Resiliency. + +# Arguments +- `bot_id`: The request for the unique ID in the list of replicated bots. +- `replica_region`: The request for the region used in the list of replicated bots. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum results given in the list of replicated bots. +- `"nextToken"`: The next token given in the list of replicated bots. +- `"sortBy"`: The requested sort category for the list of replicated bots. +""" +function list_bot_version_replicas( + botId, replicaRegion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/$(replicaRegion)/botversions/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_bot_version_replicas( + botId, + replicaRegion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/replicas/$(replicaRegion)/botversions/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bot_versions(bot_id) list_bot_versions(bot_id, params::Dict{String,<:Any}) @@ -2706,26 +3077,282 @@ function list_imports( end """ - list_intents(bot_id, bot_version, locale_id) - list_intents(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) + list_intent_metrics(bot_id, end_date_time, metrics, start_date_time) + list_intent_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) -Get a list of intents that meet the specified criteria. +Retrieves summary metrics for the intents in your bot. The following fields are required: + metrics – A list of AnalyticsIntentMetric objects. In each object, use the name field to +specify the metric to calculate, the statistic field to specify whether to calculate the +Sum, Average, or Max number, and the order field to specify whether to sort the results in +Ascending or Descending order. startDateTime and endDateTime – Define a time range for +which you want to retrieve results. Of the optional fields, you can organize the results +in the following ways: Use the filters field to filter the results, the groupBy field to +specify categories by which to group the results, and the binBy field to specify time +intervals by which to group the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. Note that an order field +exists in both binBy and metrics. You can specify only one order in a given request. # Arguments -- `bot_id`: The unique identifier of the bot that contains the intent. -- `bot_version`: The version of the bot that contains the intent. -- `locale_id`: The identifier of the language and locale of the intents to list. The string - must match one of the supported locales. For more information, see Supported languages. +- `bot_id`: The identifier for the bot for which you want to retrieve intent metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the order by which to organize the results. +- `start_date_time`: The timestamp that marks the beginning of the range of time for which + you want to see intent metrics. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filters"`: Provides the specification of a filter used to limit the intents in the - response to only those that match the filter specification. You can only specify one filter - and only one string to filter on. -- `"maxResults"`: The maximum number of intents to return in each page of results. If there - are fewer results than the max page size, only the actual number of results are returned. -- `"nextToken"`: If the response from the ListIntents operation contains more results than - specified in the maxResults parameter, a token is returned in the response. Use the +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: IntentName – The name of the intent. + IntentEndState – The final state of the intent. The possible end states are detailed in + Key definitions in the user guide. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListIntentMetrics operation contains more results + than specified in the maxResults parameter, a token is returned in the response. Use the + returned token in the nextToken parameter of a ListIntentMetrics request to return the next + page of results. For a complete set of results, call the ListIntentMetrics operation until + the nextToken returned in the response is null. +""" +function list_intent_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentmetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentmetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_intent_paths(bot_id, end_date_time, intent_path, start_date_time) + list_intent_paths(bot_id, end_date_time, intent_path, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary statistics for a path of intents that users take over sessions with your +bot. The following fields are required: startDateTime and endDateTime – Define a time +range for which you want to retrieve results. intentPath – Define an order of intents +for which you want to retrieve metrics. Separate intents in the path with a forward slash. +For example, populate the intentPath field with /BookCar/BookHotel to see details about how +many times users invoked the BookCar and BookHotel intents in that order. Use the +optional filters field to filter the results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve intent path metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent path metrics. +- `intent_path`: The intent path for which you want to retrieve metrics. Use a forward + slash to separate intents in the path. For example: /BookCar /BookCar/BookHotel + /BookHotel/BookCar +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see intent path metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each describes a condition by which you want to filter + the results. +""" +function list_intent_paths( + botId, + endDateTime, + intentPath, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentpaths", + Dict{String,Any}( + "endDateTime" => endDateTime, + "intentPath" => intentPath, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_paths( + botId, + endDateTime, + intentPath, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentpaths", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "intentPath" => intentPath, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_intent_stage_metrics(bot_id, end_date_time, metrics, start_date_time) + list_intent_stage_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary metrics for the stages within intents in your bot. The following fields +are required: metrics – A list of AnalyticsIntentStageMetric objects. In each object, +use the name field to specify the metric to calculate, the statistic field to specify +whether to calculate the Sum, Average, or Max number, and the order field to specify +whether to sort the results in Ascending or Descending order. startDateTime and +endDateTime – Define a time range for which you want to retrieve results. Of the +optional fields, you can organize the results in the following ways: Use the filters +field to filter the results, the groupBy field to specify categories by which to group the +results, and the binBy field to specify time intervals by which to group the results. Use +the maxResults field to limit the number of results to return in a single response and the +nextToken field to return the next batch of results if the response does not return the +full set of results. Note that an order field exists in both binBy and metrics. You can +only specify one order in a given request. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve intent stage metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent stage metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see intent stage metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: IntentStageName – The name of the intent stage. + SwitchedToIntent – The intent to which the conversation was switched (if any). +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListIntentStageMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListIntentStageMetrics request to + return the next page of results. For a complete set of results, call the + ListIntentStageMetrics operation until the nextToken returned in the response is null. +""" +function list_intent_stage_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentstagemetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_stage_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentstagemetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_intents(bot_id, bot_version, locale_id) + list_intents(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) + +Get a list of intents that meet the specified criteria. + +# Arguments +- `bot_id`: The unique identifier of the bot that contains the intent. +- `bot_version`: The version of the bot that contains the intent. +- `locale_id`: The identifier of the language and locale of the intents to list. The string + must match one of the supported locales. For more information, see Supported languages. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: Provides the specification of a filter used to limit the intents in the + response to only those that match the filter specification. You can only specify one filter + and only one string to filter on. +- `"maxResults"`: The maximum number of intents to return in each page of results. If there + are fewer results than the max page size, only the actual number of results are returned. +- `"nextToken"`: If the response from the ListIntents operation contains more results than + specified in the maxResults parameter, a token is returned in the response. Use the returned token in the nextToken parameter of a ListIntents request to return the next page of results. For a complete set of results, call the ListIntents operation until the nextToken returned in the response is null. @@ -2813,6 +3440,168 @@ function list_recommended_intents( ) end +""" + list_session_analytics_data(bot_id, end_date_time, start_date_time) + list_session_analytics_data(bot_id, end_date_time, start_date_time, params::Dict{String,<:Any}) + +Retrieves a list of metadata for individual user sessions with your bot. The startDateTime +and endDateTime fields are required. These fields define a time range for which you want to +retrieve results. Of the optional fields, you can organize the results in the following +ways: Use the filters field to filter the results and the sortBy field to specify the +values by which to sort the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve session analytics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see session analytics. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see session analytics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListSessionAnalyticsData operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListSessionAnalyticsData request to + return the next page of results. For a complete set of results, call the + ListSessionAnalyticsData operation until the nextToken returned in the response is null. +- `"sortBy"`: An object specifying the measure and method by which to sort the session + analytics data. +""" +function list_session_analytics_data( + botId, endDateTime, startDateTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessions", + Dict{String,Any}("endDateTime" => endDateTime, "startDateTime" => startDateTime); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_session_analytics_data( + botId, + endDateTime, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, "startDateTime" => startDateTime + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_session_metrics(bot_id, end_date_time, metrics, start_date_time) + list_session_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary metrics for the user sessions with your bot. The following fields are +required: metrics – A list of AnalyticsSessionMetric objects. In each object, use the +name field to specify the metric to calculate, the statistic field to specify whether to +calculate the Sum, Average, or Max number, and the order field to specify whether to sort +the results in Ascending or Descending order. startDateTime and endDateTime – Define a +time range for which you want to retrieve results. Of the optional fields, you can +organize the results in the following ways: Use the filters field to filter the results, +the groupBy field to specify categories by which to group the results, and the binBy field +to specify time intervals by which to group the results. Use the maxResults field to +limit the number of results to return in a single response and the nextToken field to +return the next batch of results if the response does not return the full set of results. +Note that an order field exists in both binBy and metrics. Currently, you can specify it in +either field, but not in both. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve session metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see session metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see session metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: ConversationEndState – The final state of the + conversation. The possible end states are detailed in Key definitions in the user guide. + LocaleId – The unique identifier of the bot locale. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListSessionMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListSessionMetrics request to return + the next page of results. For a complete set of results, call the ListSessionMetrics + operation until the nextToken returned in the response is null. +""" +function list_session_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessionmetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_session_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessionmetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_slot_types(bot_id, bot_version, locale_id) list_slot_types(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) @@ -3102,6 +3891,177 @@ function list_test_sets( ) end +""" + list_utterance_analytics_data(bot_id, end_date_time, start_date_time) + list_utterance_analytics_data(bot_id, end_date_time, start_date_time, params::Dict{String,<:Any}) + + To use this API operation, your IAM role must have permissions to perform the +ListAggregatedUtterances operation, which provides access to utterance-related analytics. +See Viewing utterance statistics for the IAM policy to apply to the IAM role. Retrieves a +list of metadata for individual user utterances to your bot. The following fields are +required: startDateTime and endDateTime – Define a time range for which you want to +retrieve results. Of the optional fields, you can organize the results in the following +ways: Use the filters field to filter the results and the sortBy field to specify the +values by which to sort the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve utterance analytics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see utterance analytics. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see utterance analytics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListUtteranceAnalyticsData operation contains + more results than specified in the maxResults parameter, a token is returned in the + response. Use the returned token in the nextToken parameter of a ListUtteranceAnalyticsData + request to return the next page of results. For a complete set of results, call the + ListUtteranceAnalyticsData operation until the nextToken returned in the response is null. +- `"sortBy"`: An object specifying the measure and method by which to sort the utterance + analytics data. +""" +function list_utterance_analytics_data( + botId, endDateTime, startDateTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterances", + Dict{String,Any}("endDateTime" => endDateTime, "startDateTime" => startDateTime); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_utterance_analytics_data( + botId, + endDateTime, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterances", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, "startDateTime" => startDateTime + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_utterance_metrics(bot_id, end_date_time, metrics, start_date_time) + list_utterance_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + + To use this API operation, your IAM role must have permissions to perform the +ListAggregatedUtterances operation, which provides access to utterance-related analytics. +See Viewing utterance statistics for the IAM policy to apply to the IAM role. Retrieves +summary metrics for the utterances in your bot. The following fields are required: +metrics – A list of AnalyticsUtteranceMetric objects. In each object, use the name field +to specify the metric to calculate, the statistic field to specify whether to calculate the +Sum, Average, or Max number, and the order field to specify whether to sort the results in +Ascending or Descending order. startDateTime and endDateTime – Define a time range for +which you want to retrieve results. Of the optional fields, you can organize the results +in the following ways: Use the filters field to filter the results, the groupBy field to +specify categories by which to group the results, and the binBy field to specify time +intervals by which to group the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. Note that an order field +exists in both binBy and metrics. Currently, you can specify it in either field, but not in +both. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve utterance metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see utterance metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see utterance metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attributes"`: A list containing attributes related to the utterance that you want the + response to return. The following attributes are possible: LastUsedIntent – The last + used intent at the time of the utterance. +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: UtteranceText – The transcription of the utterance. + UtteranceState – The state of the utterance. The possible states are detailed in Key + definitions in the user guide. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListUtteranceMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListUtteranceMetrics request to + return the next page of results. For a complete set of results, call the + ListUtteranceMetrics operation until the nextToken returned in the response is null. +""" +function list_utterance_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterancemetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_utterance_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterancemetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_associated_transcripts(bot_id, bot_recommendation_id, bot_version, filters, locale_id) search_associated_transcripts(bot_id, bot_recommendation_id, bot_version, filters, locale_id, params::Dict{String,<:Any}) @@ -3223,6 +4183,65 @@ function start_bot_recommendation( ) end +""" + start_bot_resource_generation(bot_id, bot_version, generation_input_prompt, locale_id) + start_bot_resource_generation(bot_id, bot_version, generation_input_prompt, locale_id, params::Dict{String,<:Any}) + +Starts a request for the descriptive bot builder to generate a bot locale configuration +based on the prompt you provide it. After you make this call, use the +DescribeBotResourceGeneration operation to check on the status of the generation and for +the generatedBotLocaleUrl when the generation is complete. Use that value to retrieve the +Amazon S3 object containing the bot locale configuration. You can then modify and import +this configuration. + +# Arguments +- `bot_id`: The unique identifier of the bot for which to generate intents and slot types. +- `bot_version`: The version of the bot for which to generate intents and slot types. +- `generation_input_prompt`: The prompt to generate intents and slot types for the bot + locale. Your description should be both detailed and precise to help generate appropriate + and sufficient intents for your bot. Include a list of actions to improve the intent + creation process. +- `locale_id`: The locale of the bot for which to generate intents and slot types. + +""" +function start_bot_resource_generation( + botId, + botVersion, + generationInputPrompt, + localeId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "PUT", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/startgeneration", + Dict{String,Any}("generationInputPrompt" => generationInputPrompt); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_bot_resource_generation( + botId, + botVersion, + generationInputPrompt, + localeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "PUT", + "/bots/$(botId)/botversions/$(botVersion)/botlocales/$(localeId)/startgeneration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("generationInputPrompt" => generationInputPrompt), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_import(import_id, merge_strategy, resource_specification) start_import(import_id, merge_strategy, resource_specification, params::Dict{String,<:Any}) @@ -3674,6 +4693,9 @@ Updates the settings that a bot has for a specific locale. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: The new description of the locale. +- `"generativeAISettings"`: Contains settings for generative AI features powered by Amazon + Bedrock for your bot locale. Use this object to turn generative AI features on and off. + Pricing may differ if you turn a feature on. For more information, see LINK. - `"voiceSettings"`: The new Amazon Polly voice Amazon Lex should use for voice interaction with the user. """ @@ -3848,6 +4870,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys fulfilled. - `"parentIntentSignature"`: The signature of the new built-in intent to use as the parent of this intent. +- `"qnAIntentConfiguration"`: Specifies the configuration of the built-in Amazon.QnAIntent. + The AMAZON.QnAIntent intent is called when Amazon Lex can't determine another intent to + invoke. If you specify this field, you can't specify the kendraConfiguration field. - `"sampleUtterances"`: New utterances used to invoke the intent. - `"slotPriorities"`: A new list of slots and their priorities that are contained by the intent. diff --git a/src/services/lightsail.jl b/src/services/lightsail.jl index c5c80bee24..47a2468631 100644 --- a/src/services/lightsail.jl +++ b/src/services/lightsail.jl @@ -108,8 +108,8 @@ access control via resource tags applied to the resource identified by disk name information, see the Amazon Lightsail Developer Guide. # Arguments -- `disk_name`: The unique Lightsail disk name (e.g., my-disk). -- `disk_path`: The disk path to expose to the instance (e.g., /dev/xvdf). +- `disk_name`: The unique Lightsail disk name (my-disk). +- `disk_path`: The disk path to expose to the instance (/dev/xvdf). - `instance_name`: The name of the Lightsail instance where you want to utilize the storage disk. @@ -540,14 +540,14 @@ Region. # Arguments - `certificate_name`: The name for the certificate. -- `domain_name`: The domain name (e.g., example.com) for the certificate. +- `domain_name`: The domain name (example.com) for the certificate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"subjectAlternativeNames"`: An array of strings that specify the alternate domains - (e.g., example2.com) and subdomains (e.g., blog.example.com) for the certificate. You can - specify a maximum of nine alternate domains (in addition to the primary domain name). - Wildcard domain entries (e.g., *.example.com) are not supported. + (example2.com) and subdomains (blog.example.com) for the certificate. You can specify a + maximum of nine alternate domains (in addition to the primary domain name). Wildcard domain + entries (*.example.com) are not supported. - `"tags"`: The tag keys and optional values to add to the certificate during create. Use the TagResource action to tag a resource after it's created. """ @@ -872,17 +872,16 @@ end create_disk(availability_zone, disk_name, size_in_gb, params::Dict{String,<:Any}) Creates a block storage disk that can be attached to an Amazon Lightsail instance in the -same Availability Zone (e.g., us-east-2a). The create disk operation supports tag-based -access control via request tags. For more information, see the Amazon Lightsail Developer -Guide. +same Availability Zone (us-east-2a). The create disk operation supports tag-based access +control via request tags. For more information, see the Amazon Lightsail Developer Guide. # Arguments -- `availability_zone`: The Availability Zone where you want to create the disk (e.g., - us-east-2a). Use the same Availability Zone as the Lightsail instance to which you want to +- `availability_zone`: The Availability Zone where you want to create the disk + (us-east-2a). Use the same Availability Zone as the Lightsail instance to which you want to attach the disk. Use the get regions operation to list the Availability Zones where Lightsail is currently available. -- `disk_name`: The unique Lightsail disk name (e.g., my-disk). -- `size_in_gb`: The size of the disk in GB (e.g., 32). +- `disk_name`: The unique Lightsail disk name (my-disk). +- `size_in_gb`: The size of the disk in GB (32). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -934,25 +933,25 @@ end create_disk_from_snapshot(availability_zone, disk_name, size_in_gb, params::Dict{String,<:Any}) Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting -disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., -us-east-2a). The create disk from snapshot operation supports tag-based access control via +disk can be attached to an Amazon Lightsail instance in the same Availability Zone +(us-east-2a). The create disk from snapshot operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name. For more information, see the Amazon Lightsail Developer Guide. # Arguments -- `availability_zone`: The Availability Zone where you want to create the disk (e.g., - us-east-2a). Choose the same Availability Zone as the Lightsail instance where you want to +- `availability_zone`: The Availability Zone where you want to create the disk + (us-east-2a). Choose the same Availability Zone as the Lightsail instance where you want to create the disk. Use the GetRegions operation to list the Availability Zones where Lightsail is currently available. -- `disk_name`: The unique Lightsail disk name (e.g., my-disk). -- `size_in_gb`: The size of the disk in GB (e.g., 32). +- `disk_name`: The unique Lightsail disk name (my-disk). +- `size_in_gb`: The size of the disk in GB (32). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"addOns"`: An array of objects that represent the add-ons to enable for the new disk. -- `"diskSnapshotName"`: The name of the disk snapshot (e.g., my-snapshot) from which to - create the new storage disk. Constraint: This parameter cannot be defined together with - the source disk name parameter. The disk snapshot name and source disk name parameters are +- `"diskSnapshotName"`: The name of the disk snapshot (my-snapshot) from which to create + the new storage disk. Constraint: This parameter cannot be defined together with the + source disk name parameter. The disk snapshot name and source disk name parameters are mutually exclusive. - `"restoreDate"`: The date of the automatic snapshot to use for the new disk. Use the get auto snapshots operation to identify the dates of the available automatic snapshots. @@ -1039,18 +1038,18 @@ access control via request tags. For more information, see the Amazon Lightsail Guide. # Arguments -- `disk_snapshot_name`: The name of the destination disk snapshot (e.g., my-disk-snapshot) - based on the source disk. +- `disk_snapshot_name`: The name of the destination disk snapshot (my-disk-snapshot) based + on the source disk. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"diskName"`: The unique name of the source disk (e.g., Disk-Virginia-1). This parameter +- `"diskName"`: The unique name of the source disk (Disk-Virginia-1). This parameter cannot be defined together with the instance name parameter. The disk name and instance name parameters are mutually exclusive. -- `"instanceName"`: The unique name of the source instance (e.g., - Amazon_Linux-512MB-Virginia-1). When this is defined, a snapshot of the instance's system - volume is created. This parameter cannot be defined together with the disk name parameter. - The instance name and disk name parameters are mutually exclusive. +- `"instanceName"`: The unique name of the source instance (Amazon_Linux-512MB-Virginia-1). + When this is defined, a snapshot of the instance's system volume is created. This + parameter cannot be defined together with the disk name parameter. The instance name and + disk name parameters are mutually exclusive. - `"tags"`: The tag keys and optional values to add to the resource during create. Use the TagResource action to tag a resource after it's created. """ @@ -1108,10 +1107,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys distribution. - `"cacheBehaviors"`: An array of objects that describe the per-path cache behavior for the distribution. +- `"certificateName"`: The name of the SSL/TLS certificate that you want to attach to the + distribution. Use the GetCertificates action to get a list of certificate names that you + can specify. - `"ipAddressType"`: The IP address type for the distribution. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. - `"tags"`: The tag keys and optional values to add to the distribution during create. Use the TagResource action to tag a resource after it's created. +- `"viewerMinimumTlsProtocolVersion"`: The minimum TLS protocol version for the SSL/TLS + certificate. """ function create_distribution( bundleId, @@ -1163,12 +1167,12 @@ end create_domain(domain_name) create_domain(domain_name, params::Dict{String,<:Any}) -Creates a domain resource for the specified domain (e.g., example.com). The create domain +Creates a domain resource for the specified domain (example.com). The create domain operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide. # Arguments -- `domain_name`: The domain name to manage (e.g., example.com). +- `domain_name`: The domain name to manage (example.com). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1211,8 +1215,8 @@ name. For more information, see the Amazon Lightsail Developer Guide. # Arguments - `domain_entry`: An array of key-value pairs containing information about the domain entry request. -- `domain_name`: The domain name (e.g., example.com) for which you want to create the - domain entry. +- `domain_name`: The domain name (example.com) for which you want to create the domain + entry. """ function create_domain_entry( @@ -1349,14 +1353,14 @@ Developer Guide. following format: us-east-2a (case sensitive). You can get a list of Availability Zones by using the get regions operation. Be sure to add the include Availability Zones parameter to your request. -- `blueprint_id`: The ID for a virtual private server image (e.g., app_wordpress_4_4 or - app_lamp_7_0). Use the get blueprints operation to return a list of available images (or +- `blueprint_id`: The ID for a virtual private server image (app_wordpress_x_x or + app_lamp_x_x). Use the get blueprints operation to return a list of available images (or blueprints). Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases. - `bundle_id`: The bundle of specification information for your virtual private server (or - instance), including the pricing plan (e.g., micro_1_0). + instance), including the pricing plan (medium_x_x). - `instance_names`: The names to use for your new Lightsail instances. Separate multiple values using quotation marks and commas, for example: [\"MyFirstInstance\",\"MySecondInstance\"] @@ -1364,10 +1368,11 @@ Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"addOns"`: An array of objects representing the add-ons to enable for the new instance. -- `"customImageName"`: (Deprecated) The name for your custom image. In releases prior to - June 12, 2017, this parameter was ignored by the API. It is now deprecated. +- `"customImageName"`: (Discontinued) The name for your custom image. In releases prior to + June 12, 2017, this parameter was ignored by the API. It is now discontinued. - `"ipAddressType"`: The IP address type for the instance. The possible values are ipv4 for - IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is + dualstack. - `"keyPairName"`: The name of your key pair. - `"tags"`: The tag keys and optional values to add to the resource during create. Use the TagResource action to tag a resource after it's created. @@ -1438,7 +1443,7 @@ information, see the Amazon Lightsail Developer Guide. Zones by using the get regions operation. Be sure to add the include Availability Zones parameter to your request. - `bundle_id`: The bundle of specification information for your virtual private server (or - instance), including the pricing plan (e.g., micro_1_0). + instance), including the pricing plan (micro_x_x). - `instance_names`: The names for your new instances. # Optional Parameters @@ -1451,7 +1456,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance name parameter. The instance snapshot name and source instance name parameters are mutually exclusive. - `"ipAddressType"`: The IP address type for the instance. The possible values are ipv4 for - IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is + dualstack. - `"keyPairName"`: The name for your key pair. - `"restoreDate"`: The date of the automatic snapshot to use for the new instance. Use the get auto snapshots operation to identify the dates of the available automatic snapshots. @@ -1582,19 +1588,20 @@ Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"certificateAlternativeNames"`: The optional alternative domains and subdomains to use - with your SSL/TLS certificate (e.g., www.example.com, example.com, m.example.com, + with your SSL/TLS certificate (www.example.com, example.com, m.example.com, blog.example.com). - `"certificateDomainName"`: The domain name with which your certificate is associated - (e.g., example.com). If you specify certificateDomainName, then certificateName is required - (and vice-versa). + (example.com). If you specify certificateDomainName, then certificateName is required (and + vice-versa). - `"certificateName"`: The name of the SSL/TLS certificate. If you specify certificateName, then certificateDomainName is required (and vice-versa). - `"healthCheckPath"`: The path you provided to perform the load balancer health check. If - you didn't specify a health check path, Lightsail uses the root path of your website (e.g., - \"/\"). You may want to specify a custom health check path other than the root of your + you didn't specify a health check path, Lightsail uses the root path of your website + (\"/\"). You may want to specify a custom health check path other than the root of your application if your home page loads slowly or has a lot of media or scripting on it. - `"ipAddressType"`: The IP address type for the load balancer. The possible values are - ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value + is dualstack. - `"tags"`: The tag keys and optional values to add to the resource during create. Use the TagResource action to tag a resource after it's created. - `"tlsPolicyName"`: The name of the TLS policy to apply to the load balancer. Use the @@ -1647,8 +1654,7 @@ tags applied to the resource identified by load balancer name. For more informat the Amazon Lightsail Developer Guide. # Arguments -- `certificate_domain_name`: The domain name (e.g., example.com) for your SSL/TLS - certificate. +- `certificate_domain_name`: The domain name (example.com) for your SSL/TLS certificate. - `certificate_name`: The SSL/TLS certificate name. You can have up to 10 certificates in your account at one time. Each Lightsail load balancer can have up to 2 certificates associated with it at one time. There is also an overall limit to the number of @@ -1661,7 +1667,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"certificateAlternativeNames"`: An array of strings listing alternative domains and subdomains for your SSL/TLS certificate. Lightsail will de-dupe the names for you. You can have a maximum of 9 alternative names (in addition to the 1 primary domain). We do not - support wildcards (e.g., *.example.com). + support wildcards (*.example.com). - `"tags"`: The tag keys and optional values to add to the resource during create. Use the TagResource action to tag a resource after it's created. """ @@ -2304,7 +2310,7 @@ applied to the resource identified by disk name. For more information, see the A Lightsail Developer Guide. # Arguments -- `disk_name`: The unique name of the disk you want to delete (e.g., my-disk). +- `disk_name`: The unique name of the disk you want to delete (my-disk). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2348,8 +2354,7 @@ tags applied to the resource identified by disk snapshot name. For more informat the Amazon Lightsail Developer Guide. # Arguments -- `disk_snapshot_name`: The name of the disk snapshot you want to delete (e.g., - my-disk-snapshot). +- `disk_snapshot_name`: The name of the disk snapshot you want to delete (my-disk-snapshot). """ function delete_disk_snapshot( @@ -2896,8 +2901,7 @@ resource tags applied to the resource identified by disk name. For more informat the Amazon Lightsail Developer Guide. # Arguments -- `disk_name`: The unique name of the disk you want to detach from your instance (e.g., - my-disk). +- `disk_name`: The unique name of the disk you want to detach from your instance (my-disk). """ function detach_disk(diskName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -4014,7 +4018,7 @@ end Returns information about a specific block storage disk. # Arguments -- `disk_name`: The name of the disk (e.g., my-disk). +- `disk_name`: The name of the disk (my-disk). """ function get_disk(diskName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -4047,7 +4051,7 @@ end Returns information about a specific block storage disk snapshot. # Arguments -- `disk_snapshot_name`: The name of the disk snapshot (e.g., my-disk-snapshot). +- `disk_snapshot_name`: The name of the disk snapshot (my-disk-snapshot). """ function get_disk_snapshot( @@ -5189,7 +5193,7 @@ end get_operations_for_resource(resource_name) get_operations_for_resource(resource_name, params::Dict{String,<:Any}) -Gets operations for a specific resource (e.g., an instance or a static IP). +Gets operations for a specific resource (an instance or a static IP). # Arguments - `resource_name`: The name of the resource for which you are requesting information. @@ -5237,10 +5241,10 @@ zones parameter to also return the Availability Zones in a region. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"includeAvailabilityZones"`: A Boolean value indicating whether to also include Availability Zones in your get regions request. Availability Zones are indicated with a - letter: e.g., us-east-2a. + letter: us-east-2a. - `"includeRelationalDatabaseAvailabilityZones"`: A Boolean value indicating whether to also include Availability Zones for databases in your get regions request. Availability - Zones are indicated with a letter (e.g., us-east-2a). + Zones are indicated with a letter (us-east-2a). """ function get_regions(; aws_config::AbstractAWSConfig=global_aws_config()) return lightsail("GetRegions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -5833,6 +5837,46 @@ function get_relational_databases( ) end +""" + get_setup_history(resource_name) + get_setup_history(resource_name, params::Dict{String,<:Any}) + +Returns detailed information for five of the most recent SetupInstanceHttps requests that +were ran on the target instance. + +# Arguments +- `resource_name`: The name of the resource for which you are requesting information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"pageToken"`: The token to advance to the next page of results from your request. To get + a page token, perform an initial GetSetupHistory request. If your results are paginated, + the response will return a next page token that you can specify as the page token in a + subsequent request. +""" +function get_setup_history(resourceName; aws_config::AbstractAWSConfig=global_aws_config()) + return lightsail( + "GetSetupHistory", + Dict{String,Any}("resourceName" => resourceName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_setup_history( + resourceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lightsail( + "GetSetupHistory", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceName" => resourceName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_static_ip(static_ip_name) get_static_ip(static_ip_name, params::Dict{String,<:Any}) @@ -6451,13 +6495,21 @@ Alternately, you can use this action to disable dual-stack, and enable IPv4 only # Arguments - `ip_address_type`: The IP address type to set for the specified resource. The possible - values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. - `resource_name`: The name of the resource for which to set the IP address type. -- `resource_type`: The resource type. The possible values are Distribution, Instance, and +- `resource_type`: The resource type. The resource values are Distribution, Instance, and LoadBalancer. Distribution-related APIs are available only in the N. Virginia (us-east-1) Amazon Web Services Region. Set your Amazon Web Services Region configuration to us-east-1 to create, view, or edit distributions. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptBundleUpdate"`: Required parameter to accept the instance bundle update when + changing to, and from, IPv6-only. An instance bundle will change when switching from + dual-stack or ipv4, to ipv6. It also changes when switching from ipv6, to dual-stack or + ipv4. You must include this parameter in the command to update the bundle. For example, if + you switch from dual-stack to ipv6, the bundle will be updated, and billing for the + IPv6-only instance bundle begins immediately. """ function set_ip_address_type( ipAddressType, @@ -6556,6 +6608,70 @@ function set_resource_access_for_bucket( ) end +""" + setup_instance_https(certificate_provider, domain_names, email_address, instance_name) + setup_instance_https(certificate_provider, domain_names, email_address, instance_name, params::Dict{String,<:Any}) + +Creates an SSL/TLS certificate that secures traffic for your website. After the certificate +is created, it is installed on the specified Lightsail instance. If you provide more than +one domain name in the request, at least one name must be less than or equal to 63 +characters in length. + +# Arguments +- `certificate_provider`: The certificate authority that issues the SSL/TLS certificate. +- `domain_names`: The name of the domain and subdomains that were specified for the SSL/TLS + certificate. +- `email_address`: The contact method for SSL/TLS certificate renewal alerts. You can enter + one email address. +- `instance_name`: The name of the Lightsail instance. + +""" +function setup_instance_https( + certificateProvider, + domainNames, + emailAddress, + instanceName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lightsail( + "SetupInstanceHttps", + Dict{String,Any}( + "certificateProvider" => certificateProvider, + "domainNames" => domainNames, + "emailAddress" => emailAddress, + "instanceName" => instanceName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function setup_instance_https( + certificateProvider, + domainNames, + emailAddress, + instanceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lightsail( + "SetupInstanceHttps", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "certificateProvider" => certificateProvider, + "domainNames" => domainNames, + "emailAddress" => emailAddress, + "instanceName" => instanceName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_guisession(resource_name) start_guisession(resource_name, params::Dict{String,<:Any}) @@ -7138,12 +7254,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys replace your distribution's existing settings. - `"cacheBehaviors"`: An array of objects that describe the per-path cache behavior for the distribution. +- `"certificateName"`: The name of the SSL/TLS certificate that you want to attach to the + distribution. Only certificates with a status of ISSUED can be attached to a distribution. + Use the GetCertificates action to get a list of certificate names that you can specify. - `"defaultCacheBehavior"`: An object that describes the default cache behavior for the distribution. - `"isEnabled"`: Indicates whether to enable the distribution. - `"origin"`: An object that describes the origin resource for the distribution, such as a Lightsail instance, bucket, or load balancer. The distribution pulls, caches, and serves content from the origin. +- `"useDefaultCertificate"`: Indicates whether the default SSL/TLS certificate is attached + to the distribution. The default value is true. When true, the distribution uses the + default domain name such as d111111abcdef8.cloudfront.net. Set this value to false to + attach a new certificate to the distribution. +- `"viewerMinimumTlsProtocolVersion"`: Use this parameter to update the minimum TLS + protocol version for the SSL/TLS certificate that's attached to the distribution. """ function update_distribution( distributionName; aws_config::AbstractAWSConfig=global_aws_config() @@ -7338,8 +7463,8 @@ information, see the Amazon Lightsail Developer Guide. TlsPolicyName for the attributeName request parameter, then the attributeValue request parameter must be the name of the TLS policy. Use the GetLoadBalancerTlsPolicies action to get a list of TLS policy names that you can specify. -- `load_balancer_name`: The name of the load balancer that you want to modify (e.g., - my-load-balancer. +- `load_balancer_name`: The name of the load balancer that you want to modify + (my-load-balancer. """ function update_load_balancer_attribute( @@ -7430,6 +7555,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys true specifies a database that is available to resources outside of your Lightsail account. A value of false specifies a database that is available only to your Lightsail resources in the same region as your database. +- `"relationalDatabaseBlueprintId"`: This parameter is used to update the major version of + the database. Enter the blueprintId for the major version that you want to update to. Use + the GetRelationalDatabaseBlueprints action to get a list of available blueprint IDs. - `"rotateMasterUserPassword"`: When true, the master user password is changed to a new strong password generated by Lightsail. Use the get relational database master user password operation to get the new password. diff --git a/src/services/location.jl b/src/services/location.jl index fb4a0ae3bc..7789cfb052 100644 --- a/src/services/location.jl +++ b/src/services/location.jl @@ -269,26 +269,26 @@ end batch_update_device_position(tracker_name, updates) batch_update_device_position(tracker_name, updates, params::Dict{String,<:Any}) -Uploads position update data for one or more devices to a tracker resource. Amazon Location -uses the data when it reports the last known device position and position history. Amazon -Location retains location data for 30 days. Position updates are handled based on the -PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, -updates are evaluated against linked geofence collections, and location data is stored at a -maximum of one position per 30 second interval. If your update frequency is more often than -every 30 seconds, only one update per 30 seconds is stored for each unique device ID. When -PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated -against linked geofence collections only if the device has moved more than 30 m (98.4 ft). -When PositionFiltering is set to AccuracyBased filtering, location data is stored and -evaluated against linked geofence collections only if the device has moved more than the -measured accuracy. For example, if two consecutive updates from a device have a horizontal -accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device -has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon -Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a -DevicePositionUpdate. +Uploads position update data for one or more devices to a tracker resource (up to 10 +devices per batch). Amazon Location uses the data when it reports the last known device +position and position history. Amazon Location retains location data for 30 days. Position +updates are handled based on the PositionFiltering property of the tracker. When +PositionFiltering is set to TimeBased, updates are evaluated against linked geofence +collections, and location data is stored at a maximum of one position per 30 second +interval. If your update frequency is more often than every 30 seconds, only one update per +30 seconds is stored for each unique device ID. When PositionFiltering is set to +DistanceBased filtering, location data is stored and evaluated against linked geofence +collections only if the device has moved more than 30 m (98.4 ft). When PositionFiltering +is set to AccuracyBased filtering, location data is stored and evaluated against linked +geofence collections only if the device has moved more than the measured accuracy. For +example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 +m, the second update is neither stored or evaluated if the device has moved less than 15 m. +If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default +value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate. # Arguments - `tracker_name`: The name of the tracker resource to update. -- `updates`: Contains the position update details for each device. +- `updates`: Contains the position update details for each device, up to 10 devices. """ function batch_update_device_position( @@ -350,6 +350,9 @@ destination must be within 40km. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ArrivalTime"`: Specifies the desired time of arrival. Uses the given time to calculate + the route. Otherwise, the best time of day to travel with the best traffic conditions is + used to calculate the route. ArrivalTime is not supported Esri. - `"CarModeOptions"`: Specifies route preferences when traveling by Car, such as avoiding routes that use ferries or tolls. Requirements: TravelMode must be specified as Car. - `"DepartNow"`: Sets the time of departure as the current time. Uses the current time to @@ -357,12 +360,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys conditions is used to calculate the route. Default Value: false Valid Values: false | true - `"DepartureTime"`: Specifies the desired time of departure. Uses the given time to calculate the route. Otherwise, the best time of day to travel with the best traffic - conditions is used to calculate the route. Setting a departure time in the past returns a - 400 ValidationException error. In ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. For - example, 2020–07-2T12:15:20.000Z+01:00 + conditions is used to calculate the route. In ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. + For example, 2020–07-2T12:15:20.000Z+01:00 - `"DistanceUnit"`: Set the unit system to specify the distance. Default Value: Kilometers - `"IncludeLegGeometry"`: Set to include the geometry details in the result for each path between a pair of positions. Default Value: false Valid Values: false | true +- `"OptimizeFor"`: Specifies the distance to optimize for when calculating a route. - `"TravelMode"`: Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. You can choose Car, Truck, Walking, Bicycle or Motorcycle as options for the TravelMode. Bicycle and Motorcycle are only @@ -384,6 +387,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ValidationException error. If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error. Valid Values: [-180 to 180,-90 to 90] +- `"key"`: The optional API key to authorize the request. """ function calculate_route( CalculatorName, @@ -496,6 +500,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TruckModeOptions"`: Specifies route preferences when traveling by Truck, such as avoiding routes that use ferries or tolls, and truck specifications to consider when choosing an optimal road. Requirements: TravelMode must be specified as Truck. +- `"key"`: The optional API key to authorize the request. """ function calculate_route_matrix( CalculatorName, @@ -598,9 +603,8 @@ end create_key(key_name, restrictions, params::Dict{String,<:Any}) Creates an API key resource in your Amazon Web Services account, which lets you grant -geo:GetMap* actions for Amazon Location Map resources to the API key bearer. The API keys -feature is in preview. We may add, change, or remove features before announcing general -availability. For more information, see Using API keys. +actions for Amazon Location resources to the API key bearer. For more information, see +Using API keys. # Arguments - `key_name`: A custom name for the API key resource. Requirements: Contain only @@ -880,6 +884,18 @@ current and historical location of devices. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: An optional description for the tracker resource. +- `"EventBridgeEnabled"`: Whether to enable position UPDATE events from this tracker to be + sent to EventBridge. You do not need enable this feature to get ENTER and EXIT events for + geofences with this tracker. Those events are always sent to EventBridge. +- `"KmsKeyEnableGeospatialQueries"`: Enables GeospatialQueries for a tracker that uses a + Amazon Web Services KMS customer managed key. This parameter is only used if you are using + a KMS customer managed key. If you wish to encrypt your data using your own KMS customer + managed key, then the Bounding Polygon Queries feature will be disabled by default. This is + because by using this feature, a representation of your device positions will not be + encrypted using the your KMS managed key. The exact device position, however; is still + encrypted using your managed key. You can choose to opt-in to the Bounding Polygon Quseries + feature. This is done by setting the KmsKeyEnableGeospatialQueries parameter to true when + creating or updating a Tracker. - `"KmsKeyId"`: A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN. - `"PositionFiltering"`: Specifies the position filtering for the tracker resource. Valid @@ -979,6 +995,12 @@ previously. # Arguments - `key_name`: The name of the API key to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"forceDelete"`: ForceDelete bypasses an API key's expiry conditions and deletes the key. + Set the parameter true to delete the key or to false to not preemptively delete the API + key. Valid values: true, or false. Required: No This action is irreversible. Only use + ForceDelete if you are certain the key is no longer in use. """ function delete_key(KeyName; aws_config::AbstractAWSConfig=global_aws_config()) return location( @@ -1171,9 +1193,7 @@ end describe_key(key_name) describe_key(key_name, params::Dict{String,<:Any}) -Retrieves the API key resource details. The API keys feature is in preview. We may add, -change, or remove features before announcing general availability. For more information, -see Using API keys. +Retrieves the API key resource details. # Arguments - `key_name`: The name of the API key resource. @@ -1368,6 +1388,66 @@ function disassociate_tracker_consumer( ) end +""" + forecast_geofence_events(collection_name, device_state) + forecast_geofence_events(collection_name, device_state, params::Dict{String,<:Any}) + +Evaluates device positions against geofence geometries from a given geofence collection. +The event forecasts three states for which a device can be in relative to a geofence: +ENTER: If a device is outside of a geofence, but would breach the fence if the device is +moving at its current speed within time horizon window. EXIT: If a device is inside of a +geofence, but would breach the fence if the device is moving at its current speed within +time horizon window. IDLE: If a device is inside of a geofence, and the device is not +moving. + +# Arguments +- `collection_name`: The name of the geofence collection. +- `device_state`: The device's state, including current position and speed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DistanceUnit"`: The distance unit used for the NearestDistance property returned in a + forecasted event. The measurement system must match for DistanceUnit and SpeedUnit; if + Kilometers is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour. + Default Value: Kilometers +- `"MaxResults"`: An optional limit for the number of resources returned in a single call. + Default value: 20 +- `"NextToken"`: The pagination token specifying which page of results to return in the + response. If no token is provided, the default page is the first page. Default value: null +- `"SpeedUnit"`: The speed unit for the device captured by the device state. The + measurement system must match for DistanceUnit and SpeedUnit; if Kilometers is specified + for DistanceUnit, then SpeedUnit must be KilometersPerHour. Default Value: + KilometersPerHour. +- `"TimeHorizonMinutes"`: Specifies the time horizon in minutes for the forecasted events. +""" +function forecast_geofence_events( + CollectionName, DeviceState; aws_config::AbstractAWSConfig=global_aws_config() +) + return location( + "POST", + "/geofencing/v0/collections/$(CollectionName)/forecast-geofence-events", + Dict{String,Any}("DeviceState" => DeviceState); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function forecast_geofence_events( + CollectionName, + DeviceState, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return location( + "POST", + "/geofencing/v0/collections/$(CollectionName)/forecast-geofence-events", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("DeviceState" => DeviceState), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_device_position(device_id, tracker_name) get_device_position(device_id, tracker_name, params::Dict{String,<:Any}) @@ -1461,7 +1541,8 @@ end get_geofence(collection_name, geofence_id) get_geofence(collection_name, geofence_id, params::Dict{String,<:Any}) -Retrieves the geofence details from a geofence collection. +Retrieves the geofence details from a geofence collection. The returned geometry will +always match the geometry format used when the geofence was created. # Arguments - `collection_name`: The geofence collection storing the target geofence. @@ -1501,7 +1582,7 @@ Retrieves glyphs used to display labels on a map. # Arguments - `font_stack`: A comma-separated list of fonts to load glyphs from in order of preference. - For example, Noto Sans Regular, Arial Unicode. Valid fonts stacks for Esri styles: + For example, Noto Sans Regular, Arial Unicode. Valid font stacks for Esri styles: VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold VectorEsriTopographic – Noto Sans Italic | Noto Sans @@ -1702,6 +1783,7 @@ Region Data provider specified in the place index resource # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"key"`: The optional API key to authorize the request. - `"language"`: The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English. This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not @@ -1746,6 +1828,7 @@ A batch request to retrieve all device positions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FilterGeometry"`: The geometry used to filter device positions. - `"MaxResults"`: An optional limit for the number of entries returned in a single call. Default value: 100 - `"NextToken"`: The pagination token specifying which page of results to return in the @@ -1850,9 +1933,7 @@ end list_keys() list_keys(params::Dict{String,<:Any}) -Lists API key resources in your Amazon Web Services account. The API keys feature is in -preview. We may add, change, or remove features before announcing general availability. For -more information, see Using API keys. +Lists API key resources in your Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2097,10 +2178,16 @@ existing geofence if a geofence ID is included in the request. # Arguments - `collection_name`: The geofence collection to store the geofence in. - `geofence_id`: An identifier for the geofence. For example, ExampleGeofence-1. -- `geometry`: Contains the details to specify the position of the geofence. Can be either a - polygon or a circle. Including both will return a validation error. Each geofence polygon - can have a maximum of 1,000 vertices. +- `geometry`: Contains the details to specify the position of the geofence. Can be a + polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will + return a validation error. The geofence polygon format supports a maximum of 1,000 + vertices. The Geofence Geobuf format supports a maximum of 100,000 vertices. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GeofenceProperties"`: Associates one of more properties with the geofence. A property + is a key-value pair stored with the geofence and added to any geofence event triggered with + that geofence. Format: \"key\" : \"value\" """ function put_geofence( CollectionName, GeofenceId, Geometry; aws_config::AbstractAWSConfig=global_aws_config() @@ -2158,6 +2245,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys not have a value for Greek, the result will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. Default value: 50 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_position( IndexName, Position; aws_config::AbstractAWSConfig=global_aws_config() @@ -2222,6 +2310,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only suggestions within the provided list of countries. Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS. @@ -2236,6 +2329,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. The default: 5 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_suggestions( IndexName, Text; aws_config::AbstractAWSConfig=global_aws_config() @@ -2297,6 +2391,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only places that are in a specified list of countries. Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS. @@ -2311,6 +2410,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. The default: 50 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_text( IndexName, Text; aws_config::AbstractAWSConfig=global_aws_config() @@ -2468,9 +2568,7 @@ end update_key(key_name) update_key(key_name, params::Dict{String,<:Any}) -Updates the specified properties of a given API key resource. The API keys feature is in -preview. We may add, change, or remove features before announcing general availability. For -more information, see Using API keys. +Updates the specified properties of a given API key resource. # Arguments - `key_name`: The name of the API key resource to update. @@ -2630,6 +2728,12 @@ Updates the specified properties of a given tracker resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: Updates the description for the tracker resource. +- `"EventBridgeEnabled"`: Whether to enable position UPDATE events from this tracker to be + sent to EventBridge. You do not need enable this feature to get ENTER and EXIT events for + geofences with this tracker. Those events are always sent to EventBridge. +- `"KmsKeyEnableGeospatialQueries"`: Enables GeospatialQueries for a tracker that uses a + Amazon Web Services KMS customer managed key. This parameter is only used if you are using + a KMS customer managed key. - `"PositionFiltering"`: Updates the position filtering for the tracker resource. Valid values: TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 @@ -2670,3 +2774,49 @@ function update_tracker( feature_set=SERVICE_FEATURE_SET, ) end + +""" + verify_device_position(device_state, tracker_name) + verify_device_position(device_state, tracker_name, params::Dict{String,<:Any}) + +Verifies the integrity of the device's position by determining if it was reported behind a +proxy, and by comparing it to an inferred position estimated based on the device's state. + +# Arguments +- `device_state`: The device's state, including position, IP address, cell signals and + Wi-Fi access points. +- `tracker_name`: The name of the tracker resource to be associated with verification + request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DistanceUnit"`: The distance unit for the verification request. Default Value: + Kilometers +""" +function verify_device_position( + DeviceState, TrackerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return location( + "POST", + "/tracking/v0/trackers/$(TrackerName)/positions/verify", + Dict{String,Any}("DeviceState" => DeviceState); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function verify_device_position( + DeviceState, + TrackerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return location( + "POST", + "/tracking/v0/trackers/$(TrackerName)/positions/verify", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("DeviceState" => DeviceState), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/lookoutequipment.jl b/src/services/lookoutequipment.jl index 9990e5d1fe..d4db5b10a8 100644 --- a/src/services/lookoutequipment.jl +++ b/src/services/lookoutequipment.jl @@ -10,7 +10,7 @@ using AWS.UUIDs Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. -In other words, it contains the location of the data source, the data schema, and other +For example, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data. # Arguments @@ -82,8 +82,8 @@ You must also provide an S3 bucket location for the output data. for Equipment runs inference on your data. For more information, see Understanding the inference process. - `inference_scheduler_name`: The name of the inference scheduler being created. -- `model_name`: The name of the previously trained ML model being used to create the - inference scheduler. +- `model_name`: The name of the previously trained machine learning model being used to + create the inference scheduler. - `role_arn`: The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference. @@ -293,20 +293,20 @@ end create_model(client_token, dataset_name, model_name) create_model(client_token, dataset_name, model_name, params::Dict{String,<:Any}) -Creates an ML model for data inference. A machine-learning (ML) model is a mathematical -model that finds patterns in your data. In Amazon Lookout for Equipment, the model learns -the patterns of normal behavior and detects abnormal behavior that could be potential -equipment failure (or maintenance events). The models are made by analyzing normal data and -abnormalities in machine behavior that have already occurred. Your model is trained using a -portion of the data from your dataset and uses that data to learn patterns of normal -behavior and abnormal patterns that lead to equipment failure. Another portion of the data -is used to evaluate the model's accuracy. +Creates a machine learning model for data inference. A machine-learning (ML) model is a +mathematical model that finds patterns in your data. In Amazon Lookout for Equipment, the +model learns the patterns of normal behavior and detects abnormal behavior that could be +potential equipment failure (or maintenance events). The models are made by analyzing +normal data and abnormalities in machine behavior that have already occurred. Your model is +trained using a portion of the data from your dataset and uses that data to learn patterns +of normal behavior and abnormal patterns that lead to equipment failure. Another portion of +the data is used to evaluate the model's accuracy. # Arguments - `client_token`: A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. -- `dataset_name`: The name of the dataset for the ML model being created. -- `model_name`: The name for the ML model to be created. +- `dataset_name`: The name of the dataset for the machine learning model being created. +- `model_name`: The name for the machine learning model to be created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -317,25 +317,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys minute. When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H -- `"DatasetSchema"`: The data schema for the ML model being created. +- `"DatasetSchema"`: The data schema for the machine learning model being created. - `"EvaluationDataEndTime"`: Indicates the time reference in the dataset that should be - used to end the subset of evaluation data for the ML model. + used to end the subset of evaluation data for the machine learning model. - `"EvaluationDataStartTime"`: Indicates the time reference in the dataset that should be - used to begin the subset of evaluation data for the ML model. + used to begin the subset of evaluation data for the machine learning model. - `"LabelsInputConfiguration"`: The input configuration for the labels being used for the - ML model that's being created. + machine learning model that's being created. +- `"ModelDiagnosticsOutputConfiguration"`: The Amazon S3 location where you want Amazon + Lookout for Equipment to save the pointwise model diagnostics. You must also specify the + RoleArn request parameter. - `"OffCondition"`: Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference. - `"RoleArn"`: The Amazon Resource Name (ARN) of a role with permission to access the data - source being used to create the ML model. + source being used to create the machine learning model. - `"ServerSideKmsKeyId"`: Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. -- `"Tags"`: Any tags associated with the ML model being created. +- `"Tags"`: Any tags associated with the machine learning model being created. - `"TrainingDataEndTime"`: Indicates the time reference in the dataset that should be used - to end the subset of training data for the ML model. + to end the subset of training data for the machine learning model. - `"TrainingDataStartTime"`: Indicates the time reference in the dataset that should be - used to begin the subset of training data for the ML model. + used to begin the subset of training data for the machine learning model. """ function create_model( ClientToken, DatasetName, ModelName; aws_config::AbstractAWSConfig=global_aws_config() @@ -376,6 +379,78 @@ function create_model( ) end +""" + create_retraining_scheduler(client_token, lookback_window, model_name, retraining_frequency) + create_retraining_scheduler(client_token, lookback_window, model_name, retraining_frequency, params::Dict{String,<:Any}) + +Creates a retraining scheduler on the specified model. + +# Arguments +- `client_token`: A unique identifier for the request. If you do not set the client request + token, Amazon Lookout for Equipment generates one. +- `lookback_window`: The number of past days of data that will be used for retraining. +- `model_name`: The name of the model to add the retraining scheduler to. +- `retraining_frequency`: This parameter uses the ISO 8601 standard to set the frequency at + which you want retraining to occur in terms of Years, Months, and/or Days (note: other + parameters like Time are not currently supported). The minimum value is 30 days (P30D) and + the maximum value is 1 year (P1Y). For example, the following values are valid: P3M15D + – Every 3 months and 15 days P2M – Every 2 months P150D – Every 150 days + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"PromoteMode"`: Indicates how the service will use new models. In MANAGED mode, new + models will automatically be used for inference if they have better performance than the + current model. In MANUAL mode, the new models will not be used until they are manually + activated. +- `"RetrainingStartDate"`: The start date for the retraining scheduler. Lookout for + Equipment truncates the time you provide to the nearest UTC day. +""" +function create_retraining_scheduler( + ClientToken, + LookbackWindow, + ModelName, + RetrainingFrequency; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "CreateRetrainingScheduler", + Dict{String,Any}( + "ClientToken" => ClientToken, + "LookbackWindow" => LookbackWindow, + "ModelName" => ModelName, + "RetrainingFrequency" => RetrainingFrequency, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_retraining_scheduler( + ClientToken, + LookbackWindow, + ModelName, + RetrainingFrequency, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "CreateRetrainingScheduler", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientToken" => ClientToken, + "LookbackWindow" => LookbackWindow, + "ModelName" => ModelName, + "RetrainingFrequency" => RetrainingFrequency, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_dataset(dataset_name) delete_dataset(dataset_name, params::Dict{String,<:Any}) @@ -417,8 +492,8 @@ end delete_inference_scheduler(inference_scheduler_name) delete_inference_scheduler(inference_scheduler_name, params::Dict{String,<:Any}) -Deletes an inference scheduler that has been set up. Already processed output results are -not affected. +Deletes an inference scheduler that has been set up. Prior inference results will not be +deleted. # Arguments - `inference_scheduler_name`: The name of the inference scheduler to be deleted. @@ -537,11 +612,12 @@ end delete_model(model_name) delete_model(model_name, params::Dict{String,<:Any}) -Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent -it from being used with an inference scheduler, even one that is already set up. +Deletes a machine learning model currently available for Amazon Lookout for Equipment. This +will prevent it from being used with an inference scheduler, even one that is already set +up. # Arguments -- `model_name`: The name of the ML model to be deleted. +- `model_name`: The name of the machine learning model to be deleted. """ function delete_model(ModelName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -567,6 +643,78 @@ function delete_model( ) end +""" + delete_resource_policy(resource_arn) + delete_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Deletes the resource policy attached to the resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which the resource + policy should be deleted. + +""" +function delete_resource_policy( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "DeleteResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "DeleteResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_retraining_scheduler(model_name) + delete_retraining_scheduler(model_name, params::Dict{String,<:Any}) + +Deletes a retraining scheduler from a model. The retraining scheduler must be in the +STOPPED status. + +# Arguments +- `model_name`: The name of the model whose retraining scheduler you want to delete. + +""" +function delete_retraining_scheduler( + ModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "DeleteRetrainingScheduler", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_retraining_scheduler( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "DeleteRetrainingScheduler", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_data_ingestion_job(job_id) describe_data_ingestion_job(job_id, params::Dict{String,<:Any}) @@ -753,11 +901,12 @@ end describe_model(model_name) describe_model(model_name, params::Dict{String,<:Any}) -Provides a JSON containing the overall information about a specific ML model, including -model name and ARN, dataset, training and evaluation information, status, and so on. +Provides a JSON containing the overall information about a specific machine learning model, +including model name and ARN, dataset, training and evaluation information, status, and so +on. # Arguments -- `model_name`: The name of the ML model to be described. +- `model_name`: The name of the machine learning model to be described. """ function describe_model(ModelName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -783,6 +932,244 @@ function describe_model( ) end +""" + describe_model_version(model_name, model_version) + describe_model_version(model_name, model_version, params::Dict{String,<:Any}) + +Retrieves information about a specific machine learning model version. + +# Arguments +- `model_name`: The name of the machine learning model that this version belongs to. +- `model_version`: The version of the machine learning model. + +""" +function describe_model_version( + ModelName, ModelVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "DescribeModelVersion", + Dict{String,Any}("ModelName" => ModelName, "ModelVersion" => ModelVersion); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_model_version( + ModelName, + ModelVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "DescribeModelVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ModelName" => ModelName, "ModelVersion" => ModelVersion), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_resource_policy(resource_arn) + describe_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Provides the details of a resource policy attached to a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that is associated with + the resource policy. + +""" +function describe_resource_policy( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "DescribeResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "DescribeResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_retraining_scheduler(model_name) + describe_retraining_scheduler(model_name, params::Dict{String,<:Any}) + +Provides a description of the retraining scheduler, including information such as the model +name and retraining parameters. + +# Arguments +- `model_name`: The name of the model that the retraining scheduler is attached to. + +""" +function describe_retraining_scheduler( + ModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "DescribeRetrainingScheduler", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_retraining_scheduler( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "DescribeRetrainingScheduler", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_dataset(client_token, source_dataset_arn) + import_dataset(client_token, source_dataset_arn, params::Dict{String,<:Any}) + +Imports a dataset. + +# Arguments +- `client_token`: A unique identifier for the request. If you do not set the client request + token, Amazon Lookout for Equipment generates one. +- `source_dataset_arn`: The Amazon Resource Name (ARN) of the dataset to import. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DatasetName"`: The name of the machine learning dataset to be created. If the dataset + already exists, Amazon Lookout for Equipment overwrites the existing dataset. If you don't + specify this field, it is filled with the name of the source dataset. +- `"ServerSideKmsKeyId"`: Provides the identifier of the KMS key key used to encrypt model + data by Amazon Lookout for Equipment. +- `"Tags"`: Any tags associated with the dataset to be created. +""" +function import_dataset( + ClientToken, SourceDatasetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "ImportDataset", + Dict{String,Any}( + "ClientToken" => ClientToken, "SourceDatasetArn" => SourceDatasetArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_dataset( + ClientToken, + SourceDatasetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "ImportDataset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientToken" => ClientToken, "SourceDatasetArn" => SourceDatasetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_model_version(client_token, dataset_name, source_model_version_arn) + import_model_version(client_token, dataset_name, source_model_version_arn, params::Dict{String,<:Any}) + +Imports a model that has been trained successfully. + +# Arguments +- `client_token`: A unique identifier for the request. If you do not set the client request + token, Amazon Lookout for Equipment generates one. +- `dataset_name`: The name of the dataset for the machine learning model being imported. +- `source_model_version_arn`: The Amazon Resource Name (ARN) of the model version to import. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InferenceDataImportStrategy"`: Indicates how to import the accumulated inference data + when a model version is imported. The possible values are as follows: NO_IMPORT – Don't + import the data. ADD_WHEN_EMPTY – Only import the data from the source model if there + is no existing data in the target model. OVERWRITE – Import the data from the source + model and overwrite the existing data in the target model. +- `"LabelsInputConfiguration"`: +- `"ModelName"`: The name for the machine learning model to be created. If the model + already exists, Amazon Lookout for Equipment creates a new version. If you do not specify + this field, it is filled with the name of the source model. +- `"RoleArn"`: The Amazon Resource Name (ARN) of a role with permission to access the data + source being used to create the machine learning model. +- `"ServerSideKmsKeyId"`: Provides the identifier of the KMS key key used to encrypt model + data by Amazon Lookout for Equipment. +- `"Tags"`: The tags associated with the machine learning model to be created. +""" +function import_model_version( + ClientToken, + DatasetName, + SourceModelVersionArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "ImportModelVersion", + Dict{String,Any}( + "ClientToken" => ClientToken, + "DatasetName" => DatasetName, + "SourceModelVersionArn" => SourceModelVersionArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_model_version( + ClientToken, + DatasetName, + SourceModelVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "ImportModelVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientToken" => ClientToken, + "DatasetName" => DatasetName, + "SourceModelVersionArn" => SourceModelVersionArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_data_ingestion_jobs() list_data_ingestion_jobs(params::Dict{String,<:Any}) @@ -850,7 +1237,7 @@ end - `inference_scheduler_name`: The name of the inference scheduler for the inference events listed. - `interval_end_time`: Returns all the inference events with an end start time equal to or - greater than less than the end time given + greater than less than the end time given. - `interval_start_time`: Lookout for Equipment will return all the inference events with an end time equal to or greater than the start time given. @@ -964,10 +1351,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"InferenceSchedulerNameBeginsWith"`: The beginning of the name of the inference schedulers to be listed. - `"MaxResults"`: Specifies the maximum number of inference schedulers to list. -- `"ModelName"`: The name of the ML model used by the inference scheduler to be listed. +- `"ModelName"`: The name of the machine learning model used by the inference scheduler to + be listed. - `"NextToken"`: An opaque pagination token indicating where to continue the listing of inference schedulers. -- `"Status"`: Specifies the current status of the inference schedulers to list. +- `"Status"`: Specifies the current status of the inference schedulers. """ function list_inference_schedulers(; aws_config::AbstractAWSConfig=global_aws_config()) return lookoutequipment( @@ -1019,7 +1407,7 @@ end Provides a list of labels. # Arguments -- `label_group_name`: Retruns the name of the label group. +- `label_group_name`: Returns the name of the label group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1056,6 +1444,57 @@ function list_labels( ) end +""" + list_model_versions(model_name) + list_model_versions(model_name, params::Dict{String,<:Any}) + +Generates a list of all model versions for a given model, including the model version, +model version ARN, and status. To list a subset of versions, use the MaxModelVersion and +MinModelVersion fields. + +# Arguments +- `model_name`: Then name of the machine learning model for which the model versions are to + be listed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreatedAtEndTime"`: Filter results to return all the model versions created before this + time. +- `"CreatedAtStartTime"`: Filter results to return all the model versions created after + this time. +- `"MaxModelVersion"`: Specifies the highest version of the model to return in the list. +- `"MaxResults"`: Specifies the maximum number of machine learning model versions to list. +- `"MinModelVersion"`: Specifies the lowest version of the model to return in the list. +- `"NextToken"`: If the total number of results exceeds the limit that the response can + display, the response returns an opaque pagination token indicating where to continue the + listing of machine learning model versions. Use this token in the NextToken field in the + request to list the next page of results. +- `"SourceType"`: Filter the results based on the way the model version was generated. +- `"Status"`: Filter the results based on the current status of the model version. +""" +function list_model_versions(ModelName; aws_config::AbstractAWSConfig=global_aws_config()) + return lookoutequipment( + "ListModelVersions", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_model_versions( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "ListModelVersions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_models() list_models(params::Dict{String,<:Any}) @@ -1065,13 +1504,14 @@ status. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DatasetNameBeginsWith"`: The beginning of the name of the dataset of the ML models to - be listed. -- `"MaxResults"`: Specifies the maximum number of ML models to list. -- `"ModelNameBeginsWith"`: The beginning of the name of the ML models being listed. -- `"NextToken"`: An opaque pagination token indicating where to continue the listing of ML - models. -- `"Status"`: The status of the ML model. +- `"DatasetNameBeginsWith"`: The beginning of the name of the dataset of the machine + learning models to be listed. +- `"MaxResults"`: Specifies the maximum number of machine learning models to list. +- `"ModelNameBeginsWith"`: The beginning of the name of the machine learning models being + listed. +- `"NextToken"`: An opaque pagination token indicating where to continue the listing of + machine learning models. +- `"Status"`: The status of the machine learning model. """ function list_models(; aws_config::AbstractAWSConfig=global_aws_config()) return lookoutequipment( @@ -1086,6 +1526,38 @@ function list_models( ) end +""" + list_retraining_schedulers() + list_retraining_schedulers(params::Dict{String,<:Any}) + +Lists all retraining schedulers in your account, filtering by model name prefix and status. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Specifies the maximum number of retraining schedulers to list. +- `"ModelNameBeginsWith"`: Specify this field to only list retraining schedulers whose + machine learning models begin with the value you specify. +- `"NextToken"`: If the number of results exceeds the maximum, a pagination token is + returned. Use the token in the request to show the next page of retraining schedulers. +- `"Status"`: Specify this field to only list retraining schedulers whose status matches + the value you specify. +""" +function list_retraining_schedulers(; aws_config::AbstractAWSConfig=global_aws_config()) + return lookoutequipment( + "ListRetrainingSchedulers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_retraining_schedulers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "ListRetrainingSchedulers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_sensor_statistics(dataset_name) list_sensor_statistics(dataset_name, params::Dict{String,<:Any}) @@ -1167,6 +1639,65 @@ function list_tags_for_resource( ) end +""" + put_resource_policy(client_token, resource_arn, resource_policy) + put_resource_policy(client_token, resource_arn, resource_policy, params::Dict{String,<:Any}) + +Creates a resource control policy for a given resource. + +# Arguments +- `client_token`: A unique identifier for the request. If you do not set the client request + token, Amazon Lookout for Equipment generates one. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which the policy is + being created. +- `resource_policy`: The JSON-formatted resource policy to create. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"PolicyRevisionId"`: A unique identifier for a revision of the resource policy. +""" +function put_resource_policy( + ClientToken, + ResourceArn, + ResourcePolicy; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "PutResourcePolicy", + Dict{String,Any}( + "ClientToken" => ClientToken, + "ResourceArn" => ResourceArn, + "ResourcePolicy" => ResourcePolicy, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_resource_policy( + ClientToken, + ResourceArn, + ResourcePolicy, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "PutResourcePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientToken" => ClientToken, + "ResourceArn" => ResourceArn, + "ResourcePolicy" => ResourcePolicy, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_data_ingestion_job(client_token, dataset_name, ingestion_input_configuration, role_arn) start_data_ingestion_job(client_token, dataset_name, ingestion_input_configuration, role_arn, params::Dict{String,<:Any}) @@ -1268,6 +1799,41 @@ function start_inference_scheduler( ) end +""" + start_retraining_scheduler(model_name) + start_retraining_scheduler(model_name, params::Dict{String,<:Any}) + +Starts a retraining scheduler. + +# Arguments +- `model_name`: The name of the model whose retraining scheduler you want to start. + +""" +function start_retraining_scheduler( + ModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "StartRetrainingScheduler", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_retraining_scheduler( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "StartRetrainingScheduler", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_inference_scheduler(inference_scheduler_name) stop_inference_scheduler(inference_scheduler_name, params::Dict{String,<:Any}) @@ -1307,6 +1873,41 @@ function stop_inference_scheduler( ) end +""" + stop_retraining_scheduler(model_name) + stop_retraining_scheduler(model_name, params::Dict{String,<:Any}) + +Stops a retraining scheduler. + +# Arguments +- `model_name`: The name of the model whose retraining scheduler you want to stop. + +""" +function stop_retraining_scheduler( + ModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "StopRetrainingScheduler", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_retraining_scheduler( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "StopRetrainingScheduler", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1394,6 +1995,49 @@ function untag_resource( ) end +""" + update_active_model_version(model_name, model_version) + update_active_model_version(model_name, model_version, params::Dict{String,<:Any}) + +Sets the active model version for a given machine learning model. + +# Arguments +- `model_name`: The name of the machine learning model for which the active model version + is being set. +- `model_version`: The version of the machine learning model for which the active model + version is being set. + +""" +function update_active_model_version( + ModelName, ModelVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "UpdateActiveModelVersion", + Dict{String,Any}("ModelName" => ModelName, "ModelVersion" => ModelVersion); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_active_model_version( + ModelName, + ModelVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "UpdateActiveModelVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ModelName" => ModelName, "ModelVersion" => ModelVersion), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_inference_scheduler(inference_scheduler_name) update_inference_scheduler(inference_scheduler_name, params::Dict{String,<:Any}) @@ -1493,3 +2137,92 @@ function update_label_group( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_model(model_name) + update_model(model_name, params::Dict{String,<:Any}) + +Updates a model in the account. + +# Arguments +- `model_name`: The name of the model to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LabelsInputConfiguration"`: +- `"ModelDiagnosticsOutputConfiguration"`: The Amazon S3 location where you want Amazon + Lookout for Equipment to save the pointwise model diagnostics for the model. You must also + specify the RoleArn request parameter. +- `"RoleArn"`: The ARN of the model to update. +""" +function update_model(ModelName; aws_config::AbstractAWSConfig=global_aws_config()) + return lookoutequipment( + "UpdateModel", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_model( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "UpdateModel", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_retraining_scheduler(model_name) + update_retraining_scheduler(model_name, params::Dict{String,<:Any}) + +Updates a retraining scheduler. + +# Arguments +- `model_name`: The name of the model whose retraining scheduler you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LookbackWindow"`: The number of past days of data that will be used for retraining. +- `"PromoteMode"`: Indicates how the service will use new models. In MANAGED mode, new + models will automatically be used for inference if they have better performance than the + current model. In MANUAL mode, the new models will not be used until they are manually + activated. +- `"RetrainingFrequency"`: This parameter uses the ISO 8601 standard to set the frequency + at which you want retraining to occur in terms of Years, Months, and/or Days (note: other + parameters like Time are not currently supported). The minimum value is 30 days (P30D) and + the maximum value is 1 year (P1Y). For example, the following values are valid: P3M15D + – Every 3 months and 15 days P2M – Every 2 months P150D – Every 150 days +- `"RetrainingStartDate"`: The start date for the retraining scheduler. Lookout for + Equipment truncates the time you provide to the nearest UTC day. +""" +function update_retraining_scheduler( + ModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lookoutequipment( + "UpdateRetrainingScheduler", + Dict{String,Any}("ModelName" => ModelName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_retraining_scheduler( + ModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lookoutequipment( + "UpdateRetrainingScheduler", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/m2.jl b/src/services/m2.jl index 501a6daa35..f583d430f8 100644 --- a/src/services/m2.jl +++ b/src/services/m2.jl @@ -62,7 +62,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys service also handles deleting the clientToken after it expires. - `"description"`: The description of the application. - `"kmsKeyId"`: The identifier of a customer managed key. -- `"roleArn"`: The Amazon Resource Name (ARN) of the role associated with the application. +- `"roleArn"`: The Amazon Resource Name (ARN) that identifies a role that the application + uses to access Amazon Web Services resources that are not part of the application or are in + a different Amazon Web Services account. - `"tags"`: A list of tags to apply to the application. """ function create_application( @@ -248,9 +250,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"highAvailabilityConfig"`: The details of a high availability configuration for this runtime environment. - `"kmsKeyId"`: The identifier of a customer managed key. -- `"preferredMaintenanceWindow"`: Configures the maintenance window you want for the - runtime environment. If you do not provide a value, a random system-generated value will be - assigned. +- `"preferredMaintenanceWindow"`: Configures the maintenance window that you want for the + runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi + and must be less than 24 hours. The following two examples are valid maintenance windows: + sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random + system-generated value will be assigned. - `"publiclyAccessible"`: Specifies whether the runtime environment is publicly accessible. - `"securityGroupIds"`: The list of security groups for the VPC associated with this runtime environment. @@ -658,6 +662,30 @@ function get_environment( ) end +""" + get_signed_bluinsights_url() + get_signed_bluinsights_url(params::Dict{String,<:Any}) + +Gets a single sign-on URL that can be used to connect to AWS Blu Insights. + +""" +function get_signed_bluinsights_url(; aws_config::AbstractAWSConfig=global_aws_config()) + return m2( + "GET", "/signed-bi-url"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_signed_bluinsights_url( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return m2( + "GET", + "/signed-bi-url", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_versions(application_id) list_application_versions(application_id, params::Dict{String,<:Any}) @@ -820,6 +848,43 @@ function list_batch_job_executions( ) end +""" + list_batch_job_restart_points(application_id, execution_id) + list_batch_job_restart_points(application_id, execution_id, params::Dict{String,<:Any}) + +Lists all the job steps for JCL files to restart a batch job. This is only applicable for +Micro Focus engine with versions 8.0.6 and above. + +# Arguments +- `application_id`: The unique identifier of the application. +- `execution_id`: The unique identifier of each batch job execution. + +""" +function list_batch_job_restart_points( + applicationId, executionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return m2( + "GET", + "/applications/$(applicationId)/batch-job-executions/$(executionId)/steps"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_batch_job_restart_points( + applicationId, + executionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return m2( + "GET", + "/applications/$(applicationId)/batch-job-executions/$(executionId)/steps", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_data_set_import_history(application_id) list_data_set_import_history(application_id, params::Dict{String,<:Any}) @@ -876,6 +941,8 @@ Modernization can import data sets into catalogs using CreateDataSetImportTask. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of objects to return. +- `"nameFilter"`: Filter dataset name matching the specified pattern. Can use * and % as + wild cards. - `"nextToken"`: A pagination token returned from a previous call to this operation. This specifies the next item to return. To return to the beginning of the list, exclude this parameter. @@ -1294,12 +1361,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Mainframe Modernization accepts the engineVersion parameter only if applyDuringMaintenanceWindow is true. If any parameter other than engineVersion is provided in UpdateEnvironmentRequest, it will fail if applyDuringMaintenanceWindow is set to true. -- `"desiredCapacity"`: The desired capacity for the runtime environment to update. +- `"desiredCapacity"`: The desired capacity for the runtime environment to update. The + minimum possible value is 0 and the maximum is 100. - `"engineVersion"`: The version of the runtime engine for the runtime environment. +- `"forceUpdate"`: Forces the updates on the environment. This option is needed if the + applications in the environment are not stopped or if there are ongoing application-related + activities in the environment. If you use this option, be aware that it could lead to data + corruption in the applications, and that you might need to perform repair and recovery + procedures for the applications. This option is not needed if the attribute being updated + is preferredMaintenanceWindow. - `"instanceType"`: The instance type for the runtime environment to update. -- `"preferredMaintenanceWindow"`: Configures the maintenance window you want for the - runtime environment. If you do not provide a value, a random system-generated value will be - assigned. +- `"preferredMaintenanceWindow"`: Configures the maintenance window that you want for the + runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi + and must be less than 24 hours. The following two examples are valid maintenance windows: + sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random + system-generated value will be assigned. """ function update_environment( environmentId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/macie.jl b/src/services/macie.jl deleted file mode 100644 index 2006ffe136..0000000000 --- a/src/services/macie.jl +++ /dev/null @@ -1,281 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: macie -using AWS.Compat -using AWS.UUIDs - -""" - associate_member_account(member_account_id) - associate_member_account(member_account_id, params::Dict{String,<:Any}) - -(Discontinued) Associates a specified Amazon Web Services account with Amazon Macie Classic -as a member account. - -# Arguments -- `member_account_id`: (Discontinued) The ID of the Amazon Web Services account that you - want to associate with Amazon Macie Classic as a member account. - -""" -function associate_member_account( - memberAccountId; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "AssociateMemberAccount", - Dict{String,Any}("memberAccountId" => memberAccountId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_member_account( - memberAccountId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return macie( - "AssociateMemberAccount", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("memberAccountId" => memberAccountId), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - associate_s3_resources(s3_resources) - associate_s3_resources(s3_resources, params::Dict{String,<:Any}) - -(Discontinued) Associates specified S3 resources with Amazon Macie Classic for monitoring -and data classification. If memberAccountId isn't specified, the action associates -specified S3 resources with Macie Classic for the current Macie Classic administrator -account. If memberAccountId is specified, the action associates specified S3 resources with -Macie Classic for the specified member account. - -# Arguments -- `s3_resources`: (Discontinued) The S3 resources that you want to associate with Amazon - Macie Classic for monitoring and data classification. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"memberAccountId"`: (Discontinued) The ID of the Amazon Macie Classic member account - whose resources you want to associate with Macie Classic. -""" -function associate_s3_resources( - s3Resources; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "AssociateS3Resources", - Dict{String,Any}("s3Resources" => s3Resources); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_s3_resources( - s3Resources, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return macie( - "AssociateS3Resources", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("s3Resources" => s3Resources), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_member_account(member_account_id) - disassociate_member_account(member_account_id, params::Dict{String,<:Any}) - -(Discontinued) Removes the specified member account from Amazon Macie Classic. - -# Arguments -- `member_account_id`: (Discontinued) The ID of the member account that you want to remove - from Amazon Macie Classic. - -""" -function disassociate_member_account( - memberAccountId; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "DisassociateMemberAccount", - Dict{String,Any}("memberAccountId" => memberAccountId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_member_account( - memberAccountId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return macie( - "DisassociateMemberAccount", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("memberAccountId" => memberAccountId), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_s3_resources(associated_s3_resources) - disassociate_s3_resources(associated_s3_resources, params::Dict{String,<:Any}) - -(Discontinued) Removes specified S3 resources from being monitored by Amazon Macie Classic. -If memberAccountId isn't specified, the action removes specified S3 resources from Macie -Classic for the current Macie Classic administrator account. If memberAccountId is -specified, the action removes specified S3 resources from Macie Classic for the specified -member account. - -# Arguments -- `associated_s3_resources`: (Discontinued) The S3 resources (buckets or prefixes) that you - want to remove from being monitored and classified by Amazon Macie Classic. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"memberAccountId"`: (Discontinued) The ID of the Amazon Macie Classic member account - whose resources you want to remove from being monitored by Macie Classic. -""" -function disassociate_s3_resources( - associatedS3Resources; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "DisassociateS3Resources", - Dict{String,Any}("associatedS3Resources" => associatedS3Resources); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_s3_resources( - associatedS3Resources, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return macie( - "DisassociateS3Resources", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("associatedS3Resources" => associatedS3Resources), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_member_accounts() - list_member_accounts(params::Dict{String,<:Any}) - -(Discontinued) Lists all Amazon Macie Classic member accounts for the current Macie Classic -administrator account. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: (Discontinued) Use this parameter to indicate the maximum number of items - that you want in the response. The default value is 250. -- `"nextToken"`: (Discontinued) Use this parameter when paginating results. Set the value - of this parameter to null on your first call to the ListMemberAccounts action. Subsequent - calls to the action fill nextToken in the request with the value of nextToken from the - previous response to continue listing data. -""" -function list_member_accounts(; aws_config::AbstractAWSConfig=global_aws_config()) - return macie( - "ListMemberAccounts"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_member_accounts( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "ListMemberAccounts", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_s3_resources() - list_s3_resources(params::Dict{String,<:Any}) - -(Discontinued) Lists all the S3 resources associated with Amazon Macie Classic. If -memberAccountId isn't specified, the action lists the S3 resources associated with Macie -Classic for the current Macie Classic administrator account. If memberAccountId is -specified, the action lists the S3 resources associated with Macie Classic for the -specified member account. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: (Discontinued) Use this parameter to indicate the maximum number of items - that you want in the response. The default value is 250. -- `"memberAccountId"`: (Discontinued) The Amazon Macie Classic member account ID whose - associated S3 resources you want to list. -- `"nextToken"`: (Discontinued) Use this parameter when paginating results. Set its value - to null on your first call to the ListS3Resources action. Subsequent calls to the action - fill nextToken in the request with the value of nextToken from the previous response to - continue listing data. -""" -function list_s3_resources(; aws_config::AbstractAWSConfig=global_aws_config()) - return macie("ListS3Resources"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) -end -function list_s3_resources( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "ListS3Resources", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_s3_resources(s3_resources_update) - update_s3_resources(s3_resources_update, params::Dict{String,<:Any}) - -(Discontinued) Updates the classification types for the specified S3 resources. If -memberAccountId isn't specified, the action updates the classification types of the S3 -resources associated with Amazon Macie Classic for the current Macie Classic administrator -account. If memberAccountId is specified, the action updates the classification types of -the S3 resources associated with Macie Classic for the specified member account. - -# Arguments -- `s3_resources_update`: (Discontinued) The S3 resources whose classification types you - want to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"memberAccountId"`: (Discontinued) The Amazon Web Services account ID of the Amazon - Macie Classic member account whose S3 resources' classification types you want to update. -""" -function update_s3_resources( - s3ResourcesUpdate; aws_config::AbstractAWSConfig=global_aws_config() -) - return macie( - "UpdateS3Resources", - Dict{String,Any}("s3ResourcesUpdate" => s3ResourcesUpdate); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_s3_resources( - s3ResourcesUpdate, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return macie( - "UpdateS3Resources", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("s3ResourcesUpdate" => s3ResourcesUpdate), params - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/macie2.jl b/src/services/macie2.jl index 2873752c14..6711ab5138 100644 --- a/src/services/macie2.jl +++ b/src/services/macie2.jl @@ -79,6 +79,40 @@ function batch_get_custom_data_identifiers( ) end +""" + batch_update_automated_discovery_accounts() + batch_update_automated_discovery_accounts(params::Dict{String,<:Any}) + +Changes the status of automated sensitive data discovery for one or more accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accounts"`: An array of objects, one for each account to change the status of automated + sensitive data discovery for. Each object specifies the Amazon Web Services account ID for + an account and a new status for that account. +""" +function batch_update_automated_discovery_accounts(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return macie2( + "PATCH", + "/automated-discovery/accounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_update_automated_discovery_accounts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return macie2( + "PATCH", + "/automated-discovery/accounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_allow_list(client_token, criteria, name) create_allow_list(client_token, criteria, name, params::Dict{String,<:Any}) @@ -151,7 +185,7 @@ Creates and defines the settings for a classification job. - `job_type`: The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this - value, use the scheduleFrequency property to define the recurrence pattern for the job. + value, use the scheduleFrequency property to specify the recurrence pattern for the job. - `name`: A custom name for the job. The name can contain as many as 500 characters. - `s3_job_definition`: The S3 buckets that contain the objects to analyze, and the scope of that analysis. @@ -177,17 +211,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specify for the job (managedDataIdentifierSelector). To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation. - `"managedDataIdentifierSelector"`: The selection type to apply when determining which - managed data identifiers the job uses to analyze data. Valid values are: ALL - Use all the - managed data identifiers that Amazon Macie provides. If you specify this value, don't - specify any values for the managedDataIdentifierIds property. EXCLUDE - Use all the managed - data identifiers that Macie provides except the managed data identifiers specified by the - managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers - specified by the managedDataIdentifierIds property. NONE - Don't use any managed data - identifiers. If you specify this value, specify at least one custom data identifier for the - job (customDataIdentifierIds) and don't specify any values for the managedDataIdentifierIds - property. If you don't specify a value for this property, the job uses all managed data - identifiers. If you don't specify a value for this property or you specify ALL or EXCLUDE - for a recurring job, the job also uses new managed data identifiers as they are released. + managed data identifiers the job uses to analyze data. Valid values are: ALL - Use all + managed data identifiers. If you specify this value, don't specify any values for the + managedDataIdentifierIds property. EXCLUDE - Use all managed data identifiers except the + ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed + data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any + managed data identifiers. If you specify this value, specify at least one value for the + customDataIdentifierIds property and don't specify any values for the + managedDataIdentifierIds property. RECOMMENDED (default) - Use the recommended set of + managed data identifiers. If you specify this value, don't specify any values for the + managedDataIdentifierIds property. If you don't specify a value for this property, the job + uses the recommended set of managed data identifiers. If the job is a recurring job and you + specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that + are released. If you don't specify a value for this property or you specify RECOMMENDED for + a recurring job, each job run automatically uses all the managed data identifiers that are + in the recommended set when the run starts. To learn about individual managed data + identifiers or determine which ones are in the recommended set, see Using managed data + identifiers or Recommended managed data identifiers in the Amazon Macie User Guide. - `"samplingPercentage"`: The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at @@ -285,7 +325,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys matches the pattern and the keyword is within the specified distance, Amazon Macie includes the result. The distance can be 1-300 characters. The default value is 50. - `"severityLevels"`: The severity to assign to findings that the custom data identifier - produces, based on the number of occurrences of text that matches the custom data + produces, based on the number of occurrences of text that match the custom data identifier's detection criteria. You can specify as many as three SeverityLevel objects in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify more than one, the occurrences thresholds must be in ascending order by severity, moving from LOW to HIGH. For @@ -1104,7 +1144,7 @@ end get_automated_discovery_configuration(params::Dict{String,<:Any}) Retrieves the configuration settings and status of automated sensitive data discovery for -an account. +an organization or standalone account. """ function get_automated_discovery_configuration(; @@ -1253,7 +1293,7 @@ end get_finding_statistics(group_by) get_finding_statistics(group_by, params::Dict{String,<:Any}) - Retrieves (queries) aggregated statistical data about findings. +Retrieves (queries) aggregated statistical data about findings. # Arguments - `group_by`: The finding property to use to group the query results. Valid values are: @@ -1735,6 +1775,45 @@ function list_allow_lists( ) end +""" + list_automated_discovery_accounts() + list_automated_discovery_accounts(params::Dict{String,<:Any}) + +Retrieves the status of automated sensitive data discovery for one or more accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: The Amazon Web Services account ID for each account, for as many as 50 + accounts. To retrieve the status for multiple accounts, append the accountIds parameter and + argument for each account, separated by an ampersand (&). To retrieve the status for + all the accounts in an organization, omit this parameter. +- `"maxResults"`: The maximum number of items to include in each page of a paginated + response. +- `"nextToken"`: The nextToken string that specifies which page of results to return in a + paginated response. +""" +function list_automated_discovery_accounts(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return macie2( + "GET", + "/automated-discovery/accounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_automated_discovery_accounts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return macie2( + "GET", + "/automated-discovery/accounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_classification_jobs() list_classification_jobs(params::Dict{String,<:Any}) @@ -1887,8 +1966,8 @@ end list_invitations() list_invitations(params::Dict{String,<:Any}) -Retrieves information about the Amazon Macie membership invitations that were received by -an account. +Retrieves information about Amazon Macie membership invitations that were received by an +account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2006,8 +2085,8 @@ end list_resource_profile_artifacts(resource_arn) list_resource_profile_artifacts(resource_arn, params::Dict{String,<:Any}) -Retrieves information about objects that were selected from an S3 bucket for automated -sensitive data discovery. +Retrieves information about objects that Amazon Macie selected from an S3 bucket for +automated sensitive data discovery. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the S3 bucket that the request applies @@ -2163,7 +2242,7 @@ end put_classification_export_configuration(configuration) put_classification_export_configuration(configuration, params::Dict{String,<:Any}) -Creates or updates the configuration settings for storing data classification results. +Adds or updates the configuration settings for storing data classification results. # Arguments - `configuration`: The location to store data classification results in, and the encryption @@ -2315,7 +2394,7 @@ end test_custom_data_identifier(regex, sample_text) test_custom_data_identifier(regex, sample_text, params::Dict{String,<:Any}) -Tests a custom data identifier. +Tests criteria for a custom data identifier. # Arguments - `regex`: The regular expression (regex) that defines the pattern to match. The expression @@ -2466,19 +2545,26 @@ end update_automated_discovery_configuration(status) update_automated_discovery_configuration(status, params::Dict{String,<:Any}) -Enables or disables automated sensitive data discovery for an account. +Changes the configuration settings and status of automated sensitive data discovery for an +organization or standalone account. # Arguments -- `status`: The new status of automated sensitive data discovery for the account. Valid - values are: ENABLED, start or resume automated sensitive data discovery activities for the - account; and, DISABLED, stop performing automated sensitive data discovery activities for - the account. When you enable automated sensitive data discovery for the first time, Amazon - Macie uses default configuration settings to determine which data sources to analyze and - which managed data identifiers to use. To change these settings, use the - UpdateClassificationScope and UpdateSensitivityInspectionTemplate operations, respectively. - If you change the settings and subsequently disable the configuration, Amazon Macie retains - your changes. +- `status`: The new status of automated sensitive data discovery for the organization or + account. Valid values are: ENABLED, start or resume all automated sensitive data discovery + activities; and, DISABLED, stop performing all automated sensitive data discovery + activities. If you specify DISABLED for an administrator account, you also disable + automated sensitive data discovery for all member accounts in the organization. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoEnableOrganizationMembers"`: Specifies whether to automatically enable automated + sensitive data discovery for accounts in the organization. Valid values are: ALL (default), + enable it for all existing accounts and new member accounts; NEW, enable it only for new + member accounts; and, NONE, don't enable it for any accounts. If you specify NEW or NONE, + automated sensitive data discovery continues to be enabled for any existing accounts that + it's currently enabled for. To enable or disable it for individual member accounts, specify + NEW or NONE, and then enable or disable it for each account by using the + BatchUpdateAutomatedDiscoveryAccounts operation. """ function update_automated_discovery_configuration( status; aws_config::AbstractAWSConfig=global_aws_config() @@ -2716,8 +2802,8 @@ end Updates the Amazon Macie configuration settings for an organization in Organizations. # Arguments -- `auto_enable`: Specifies whether to enable Amazon Macie automatically for an account when - the account is added to the organization in Organizations. +- `auto_enable`: Specifies whether to enable Amazon Macie automatically for accounts that + are added to the organization in Organizations. """ function update_organization_configuration( @@ -2843,9 +2929,13 @@ Updates the status and configuration settings for retrieving occurrences of sens reported by findings. # Arguments -- `configuration`: The new configuration settings and the status of the configuration for - the account. +- `configuration`: The KMS key to use to encrypt the sensitive data, and the status of the + configuration for the Amazon Macie account. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"retrievalConfiguration"`: The access method and settings to use when retrieving the + sensitive data. """ function update_reveal_configuration( configuration; aws_config::AbstractAWSConfig=global_aws_config() @@ -2888,12 +2978,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A custom description of the template. The description can contain as many as 200 characters. - `"excludes"`: The managed data identifiers to explicitly exclude (not use) when - analyzing data. To exclude an allow list or custom data identifier that's currently - included by the template, update the values for the + performing automated sensitive data discovery. To exclude an allow list or custom data + identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively. - `"includes"`: The allow lists, custom data identifiers, and managed data identifiers to - include (use) when analyzing data. + explicitly include (use) when performing automated sensitive data discovery. """ function update_sensitivity_inspection_template( id; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/mailmanager.jl b/src/services/mailmanager.jl new file mode 100644 index 0000000000..6a914312b9 --- /dev/null +++ b/src/services/mailmanager.jl @@ -0,0 +1,1858 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: mailmanager +using AWS.Compat +using AWS.UUIDs + +""" + create_addon_instance(addon_subscription_id) + create_addon_instance(addon_subscription_id, params::Dict{String,<:Any}) + +Creates an Add On instance for the subscription indicated in the request. The resulting +Amazon Resource Name (ARN) can be used in a conditional statement for a rule set or traffic +policy. + +# Arguments +- `addon_subscription_id`: The unique ID of a previously created subscription that an Add + On instance is created for. You can only have one instance per subscription. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_addon_instance( + AddonSubscriptionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "CreateAddonInstance", + Dict{String,Any}( + "AddonSubscriptionId" => AddonSubscriptionId, "ClientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_addon_instance( + AddonSubscriptionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateAddonInstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AddonSubscriptionId" => AddonSubscriptionId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_addon_subscription(addon_name) + create_addon_subscription(addon_name, params::Dict{String,<:Any}) + +Creates a subscription for an Add On representing the acceptance of its terms of use and +additional pricing. The subscription can then be used to create an instance for use in rule +sets or traffic policies. + +# Arguments +- `addon_name`: The name of the Add On to subscribe to. You can only have one subscription + for each Add On name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_addon_subscription( + AddonName; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "CreateAddonSubscription", + Dict{String,Any}("AddonName" => AddonName, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_addon_subscription( + AddonName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateAddonSubscription", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AddonName" => AddonName, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_archive(archive_name) + create_archive(archive_name, params::Dict{String,<:Any}) + +Creates a new email archive resource for storing and retaining emails. + +# Arguments +- `archive_name`: A unique name for the new archive. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token Amazon SES uses to recognize retries of this request. +- `"KmsKeyArn"`: The Amazon Resource Name (ARN) of the KMS key for encrypting emails in the + archive. +- `"Retention"`: The period for retaining emails in the archive before automatic deletion. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_archive(ArchiveName; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "CreateArchive", + Dict{String,Any}("ArchiveName" => ArchiveName, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_archive( + ArchiveName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateArchive", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ArchiveName" => ArchiveName, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_ingress_point(ingress_point_name, rule_set_id, traffic_policy_id, type) + create_ingress_point(ingress_point_name, rule_set_id, traffic_policy_id, type, params::Dict{String,<:Any}) + +Provision a new ingress endpoint resource. + +# Arguments +- `ingress_point_name`: A user friendly name for an ingress endpoint resource. +- `rule_set_id`: The identifier of an existing rule set that you attach to an ingress + endpoint resource. +- `traffic_policy_id`: The identifier of an existing traffic policy that you attach to an + ingress endpoint resource. +- `type`: The type of the ingress endpoint to create. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"IngressPointConfiguration"`: If you choose an Authenticated ingress endpoint, you must + configure either an SMTP password or a secret ARN. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_ingress_point( + IngressPointName, + RuleSetId, + TrafficPolicyId, + Type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateIngressPoint", + Dict{String,Any}( + "IngressPointName" => IngressPointName, + "RuleSetId" => RuleSetId, + "TrafficPolicyId" => TrafficPolicyId, + "Type" => Type, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ingress_point( + IngressPointName, + RuleSetId, + TrafficPolicyId, + Type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateIngressPoint", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IngressPointName" => IngressPointName, + "RuleSetId" => RuleSetId, + "TrafficPolicyId" => TrafficPolicyId, + "Type" => Type, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_relay(authentication, relay_name, server_name, server_port) + create_relay(authentication, relay_name, server_name, server_port, params::Dict{String,<:Any}) + +Creates a relay resource which can be used in rules to relay incoming emails to defined +relay destinations. + +# Arguments +- `authentication`: Authentication for the relay destination server—specify the secretARN + where the SMTP credentials are stored. +- `relay_name`: The unique name of the relay resource. +- `server_name`: The destination relay server address. +- `server_port`: The destination relay server port. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_relay( + Authentication, + RelayName, + ServerName, + ServerPort; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateRelay", + Dict{String,Any}( + "Authentication" => Authentication, + "RelayName" => RelayName, + "ServerName" => ServerName, + "ServerPort" => ServerPort, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_relay( + Authentication, + RelayName, + ServerName, + ServerPort, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateRelay", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Authentication" => Authentication, + "RelayName" => RelayName, + "ServerName" => ServerName, + "ServerPort" => ServerPort, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_rule_set(rule_set_name, rules) + create_rule_set(rule_set_name, rules, params::Dict{String,<:Any}) + +Provision a new rule set. + +# Arguments +- `rule_set_name`: A user-friendly name for the rule set. +- `rules`: Conditional rules that are evaluated for determining actions on email. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_rule_set( + RuleSetName, Rules; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "CreateRuleSet", + Dict{String,Any}( + "RuleSetName" => RuleSetName, "Rules" => Rules, "ClientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_rule_set( + RuleSetName, + Rules, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateRuleSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RuleSetName" => RuleSetName, + "Rules" => Rules, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_traffic_policy(default_action, policy_statements, traffic_policy_name) + create_traffic_policy(default_action, policy_statements, traffic_policy_name, params::Dict{String,<:Any}) + +Provision a new traffic policy resource. + +# Arguments +- `default_action`: Default action instructs the traffic policy to either Allow or Deny + (block) messages that fall outside of (or not addressed by) the conditions of your policy + statements +- `policy_statements`: Conditional statements for filtering email traffic. +- `traffic_policy_name`: A user-friendly name for the traffic policy resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique token that Amazon SES uses to recognize subsequent retries of + the same request. +- `"MaxMessageSizeBytes"`: The maximum message size in bytes of email which is allowed in + by this traffic policy—anything larger will be blocked. +- `"Tags"`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. +""" +function create_traffic_policy( + DefaultAction, + PolicyStatements, + TrafficPolicyName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateTrafficPolicy", + Dict{String,Any}( + "DefaultAction" => DefaultAction, + "PolicyStatements" => PolicyStatements, + "TrafficPolicyName" => TrafficPolicyName, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_traffic_policy( + DefaultAction, + PolicyStatements, + TrafficPolicyName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "CreateTrafficPolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DefaultAction" => DefaultAction, + "PolicyStatements" => PolicyStatements, + "TrafficPolicyName" => TrafficPolicyName, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_addon_instance(addon_instance_id) + delete_addon_instance(addon_instance_id, params::Dict{String,<:Any}) + +Deletes an Add On instance. + +# Arguments +- `addon_instance_id`: The Add On instance ID to delete. + +""" +function delete_addon_instance( + AddonInstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "DeleteAddonInstance", + Dict{String,Any}("AddonInstanceId" => AddonInstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_addon_instance( + AddonInstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteAddonInstance", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("AddonInstanceId" => AddonInstanceId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_addon_subscription(addon_subscription_id) + delete_addon_subscription(addon_subscription_id, params::Dict{String,<:Any}) + +Deletes an Add On subscription. + +# Arguments +- `addon_subscription_id`: The Add On subscription ID to delete. + +""" +function delete_addon_subscription( + AddonSubscriptionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "DeleteAddonSubscription", + Dict{String,Any}("AddonSubscriptionId" => AddonSubscriptionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_addon_subscription( + AddonSubscriptionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteAddonSubscription", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AddonSubscriptionId" => AddonSubscriptionId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_archive(archive_id) + delete_archive(archive_id, params::Dict{String,<:Any}) + +Initiates deletion of an email archive. This changes the archive state to pending deletion. +In this state, no new emails can be added, and existing archived emails become inaccessible +(search, export, download). The archive and all of its contents will be permanently deleted +30 days after entering the pending deletion state, regardless of the configured retention +period. + +# Arguments +- `archive_id`: The identifier of the archive to delete. + +""" +function delete_archive(ArchiveId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "DeleteArchive", + Dict{String,Any}("ArchiveId" => ArchiveId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_archive( + ArchiveId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteArchive", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ArchiveId" => ArchiveId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_ingress_point(ingress_point_id) + delete_ingress_point(ingress_point_id, params::Dict{String,<:Any}) + +Delete an ingress endpoint resource. + +# Arguments +- `ingress_point_id`: The identifier of the ingress endpoint resource that you want to + delete. + +""" +function delete_ingress_point( + IngressPointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "DeleteIngressPoint", + Dict{String,Any}("IngressPointId" => IngressPointId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ingress_point( + IngressPointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteIngressPoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("IngressPointId" => IngressPointId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_relay(relay_id) + delete_relay(relay_id, params::Dict{String,<:Any}) + +Deletes an existing relay resource. + +# Arguments +- `relay_id`: The unique relay identifier. + +""" +function delete_relay(RelayId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "DeleteRelay", + Dict{String,Any}("RelayId" => RelayId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_relay( + RelayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "DeleteRelay", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RelayId" => RelayId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_rule_set(rule_set_id) + delete_rule_set(rule_set_id, params::Dict{String,<:Any}) + +Delete a rule set. + +# Arguments +- `rule_set_id`: The identifier of an existing rule set resource to delete. + +""" +function delete_rule_set(RuleSetId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "DeleteRuleSet", + Dict{String,Any}("RuleSetId" => RuleSetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_rule_set( + RuleSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteRuleSet", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RuleSetId" => RuleSetId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_traffic_policy(traffic_policy_id) + delete_traffic_policy(traffic_policy_id, params::Dict{String,<:Any}) + +Delete a traffic policy resource. + +# Arguments +- `traffic_policy_id`: The identifier of the traffic policy that you want to delete. + +""" +function delete_traffic_policy( + TrafficPolicyId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "DeleteTrafficPolicy", + Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_traffic_policy( + TrafficPolicyId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "DeleteTrafficPolicy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_addon_instance(addon_instance_id) + get_addon_instance(addon_instance_id, params::Dict{String,<:Any}) + +Gets detailed information about an Add On instance. + +# Arguments +- `addon_instance_id`: The Add On instance ID to retrieve information for. + +""" +function get_addon_instance( + AddonInstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetAddonInstance", + Dict{String,Any}("AddonInstanceId" => AddonInstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_addon_instance( + AddonInstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetAddonInstance", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("AddonInstanceId" => AddonInstanceId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_addon_subscription(addon_subscription_id) + get_addon_subscription(addon_subscription_id, params::Dict{String,<:Any}) + +Gets detailed information about an Add On subscription. + +# Arguments +- `addon_subscription_id`: The Add On subscription ID to retrieve information for. + +""" +function get_addon_subscription( + AddonSubscriptionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetAddonSubscription", + Dict{String,Any}("AddonSubscriptionId" => AddonSubscriptionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_addon_subscription( + AddonSubscriptionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetAddonSubscription", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AddonSubscriptionId" => AddonSubscriptionId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive(archive_id) + get_archive(archive_id, params::Dict{String,<:Any}) + +Retrieves the full details and current state of a specified email archive. + +# Arguments +- `archive_id`: The identifier of the archive to retrieve. + +""" +function get_archive(ArchiveId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "GetArchive", + Dict{String,Any}("ArchiveId" => ArchiveId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive( + ArchiveId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchive", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ArchiveId" => ArchiveId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive_export(export_id) + get_archive_export(export_id, params::Dict{String,<:Any}) + +Retrieves the details and current status of a specific email archive export job. + +# Arguments +- `export_id`: The identifier of the export job to get details for. + +""" +function get_archive_export(ExportId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "GetArchiveExport", + Dict{String,Any}("ExportId" => ExportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive_export( + ExportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchiveExport", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ExportId" => ExportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive_message(archived_message_id) + get_archive_message(archived_message_id, params::Dict{String,<:Any}) + +Returns a pre-signed URL that provides temporary download access to the specific email +message stored in the archive. + +# Arguments +- `archived_message_id`: The unique identifier of the archived email message. + +""" +function get_archive_message( + ArchivedMessageId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetArchiveMessage", + Dict{String,Any}("ArchivedMessageId" => ArchivedMessageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive_message( + ArchivedMessageId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchiveMessage", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ArchivedMessageId" => ArchivedMessageId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive_message_content(archived_message_id) + get_archive_message_content(archived_message_id, params::Dict{String,<:Any}) + +Returns the textual content of a specific email message stored in the archive. Attachments +are not included. + +# Arguments +- `archived_message_id`: The unique identifier of the archived email message. + +""" +function get_archive_message_content( + ArchivedMessageId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetArchiveMessageContent", + Dict{String,Any}("ArchivedMessageId" => ArchivedMessageId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive_message_content( + ArchivedMessageId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchiveMessageContent", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ArchivedMessageId" => ArchivedMessageId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive_search(search_id) + get_archive_search(search_id, params::Dict{String,<:Any}) + +Retrieves the details and current status of a specific email archive search job. + +# Arguments +- `search_id`: The identifier of the search job to get details for. + +""" +function get_archive_search(SearchId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "GetArchiveSearch", + Dict{String,Any}("SearchId" => SearchId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive_search( + SearchId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchiveSearch", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SearchId" => SearchId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_archive_search_results(search_id) + get_archive_search_results(search_id, params::Dict{String,<:Any}) + +Returns the results of a completed email archive search job. + +# Arguments +- `search_id`: The identifier of the completed search job. + +""" +function get_archive_search_results( + SearchId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetArchiveSearchResults", + Dict{String,Any}("SearchId" => SearchId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_archive_search_results( + SearchId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetArchiveSearchResults", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SearchId" => SearchId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingress_point(ingress_point_id) + get_ingress_point(ingress_point_id, params::Dict{String,<:Any}) + +Fetch ingress endpoint resource attributes. + +# Arguments +- `ingress_point_id`: The identifier of an ingress endpoint. + +""" +function get_ingress_point( + IngressPointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetIngressPoint", + Dict{String,Any}("IngressPointId" => IngressPointId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingress_point( + IngressPointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetIngressPoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("IngressPointId" => IngressPointId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_relay(relay_id) + get_relay(relay_id, params::Dict{String,<:Any}) + +Fetch the relay resource and it's attributes. + +# Arguments +- `relay_id`: A unique relay identifier. + +""" +function get_relay(RelayId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "GetRelay", + Dict{String,Any}("RelayId" => RelayId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_relay( + RelayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetRelay", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RelayId" => RelayId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_rule_set(rule_set_id) + get_rule_set(rule_set_id, params::Dict{String,<:Any}) + +Fetch attributes of a rule set. + +# Arguments +- `rule_set_id`: The identifier of an existing rule set to be retrieved. + +""" +function get_rule_set(RuleSetId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "GetRuleSet", + Dict{String,Any}("RuleSetId" => RuleSetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_rule_set( + RuleSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetRuleSet", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RuleSetId" => RuleSetId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_traffic_policy(traffic_policy_id) + get_traffic_policy(traffic_policy_id, params::Dict{String,<:Any}) + +Fetch attributes of a traffic policy resource. + +# Arguments +- `traffic_policy_id`: The identifier of the traffic policy resource. + +""" +function get_traffic_policy( + TrafficPolicyId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "GetTrafficPolicy", + Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_traffic_policy( + TrafficPolicyId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "GetTrafficPolicy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_addon_instances() + list_addon_instances(params::Dict{String,<:Any}) + +Lists all Add On instances in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The maximum number of ingress endpoint resources that are returned per + call. You can use NextToken to obtain further ingress endpoints. +""" +function list_addon_instances(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListAddonInstances"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_addon_instances( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListAddonInstances", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_addon_subscriptions() + list_addon_subscriptions(params::Dict{String,<:Any}) + +Lists all Add On subscriptions in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The maximum number of ingress endpoint resources that are returned per + call. You can use NextToken to obtain further ingress endpoints. +""" +function list_addon_subscriptions(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListAddonSubscriptions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_addon_subscriptions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListAddonSubscriptions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_archive_exports(archive_id) + list_archive_exports(archive_id, params::Dict{String,<:Any}) + +Returns a list of email archive export jobs. + +# Arguments +- `archive_id`: The identifier of the archive. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If NextToken is returned, there are more results available. The value of + NextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +- `"PageSize"`: The maximum number of archive export jobs that are returned per call. You + can use NextToken to obtain further pages of archives. +""" +function list_archive_exports(ArchiveId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListArchiveExports", + Dict{String,Any}("ArchiveId" => ArchiveId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_archive_exports( + ArchiveId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "ListArchiveExports", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ArchiveId" => ArchiveId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_archive_searches(archive_id) + list_archive_searches(archive_id, params::Dict{String,<:Any}) + +Returns a list of email archive search jobs. + +# Arguments +- `archive_id`: The identifier of the archive. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If NextToken is returned, there are more results available. The value of + NextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +- `"PageSize"`: The maximum number of archive search jobs that are returned per call. You + can use NextToken to obtain further pages of archives. +""" +function list_archive_searches(ArchiveId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListArchiveSearches", + Dict{String,Any}("ArchiveId" => ArchiveId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_archive_searches( + ArchiveId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "ListArchiveSearches", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ArchiveId" => ArchiveId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_archives() + list_archives(params::Dict{String,<:Any}) + +Returns a list of all email archives in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If NextToken is returned, there are more results available. The value of + NextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +- `"PageSize"`: The maximum number of archives that are returned per call. You can use + NextToken to obtain further pages of archives. +""" +function list_archives(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListArchives"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_archives( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListArchives", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_ingress_points() + list_ingress_points(params::Dict{String,<:Any}) + +List all ingress endpoint resources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The maximum number of ingress endpoint resources that are returned per + call. You can use NextToken to obtain further ingress endpoints. +""" +function list_ingress_points(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListIngressPoints"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_ingress_points( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListIngressPoints", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_relays() + list_relays(params::Dict{String,<:Any}) + +Lists all the existing relay resources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The number of relays to be returned in one request. +""" +function list_relays(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager("ListRelays"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_relays( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListRelays", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_rule_sets() + list_rule_sets(params::Dict{String,<:Any}) + +List rule sets for this account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The maximum number of rule set resources that are returned per call. You + can use NextToken to obtain further rule sets. +""" +function list_rule_sets(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListRuleSets"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_rule_sets( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListRuleSets", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + + Retrieves the list of tags (keys and values) assigned to the resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to retrieve tags from. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListTagsForResource", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_traffic_policies() + list_traffic_policies(params::Dict{String,<:Any}) + +List traffic policy resources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: If you received a pagination token from a previous call to this API, you + can provide it here to continue paginating through the next page of results. +- `"PageSize"`: The maximum number of traffic policy resources that are returned per call. + You can use NextToken to obtain further traffic policies. +""" +function list_traffic_policies(; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "ListTrafficPolicies"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_traffic_policies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "ListTrafficPolicies", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_archive_export(archive_id, export_destination_configuration, from_timestamp, to_timestamp) + start_archive_export(archive_id, export_destination_configuration, from_timestamp, to_timestamp, params::Dict{String,<:Any}) + +Initiates an export of emails from the specified archive. + +# Arguments +- `archive_id`: The identifier of the archive to export emails from. +- `export_destination_configuration`: Details on where to deliver the exported email data. +- `from_timestamp`: The start of the timestamp range to include emails from. +- `to_timestamp`: The end of the timestamp range to include emails from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Criteria to filter which emails are included in the export. +- `"MaxResults"`: The maximum number of email items to include in the export. +""" +function start_archive_export( + ArchiveId, + ExportDestinationConfiguration, + FromTimestamp, + ToTimestamp; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StartArchiveExport", + Dict{String,Any}( + "ArchiveId" => ArchiveId, + "ExportDestinationConfiguration" => ExportDestinationConfiguration, + "FromTimestamp" => FromTimestamp, + "ToTimestamp" => ToTimestamp, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_archive_export( + ArchiveId, + ExportDestinationConfiguration, + FromTimestamp, + ToTimestamp, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StartArchiveExport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ArchiveId" => ArchiveId, + "ExportDestinationConfiguration" => ExportDestinationConfiguration, + "FromTimestamp" => FromTimestamp, + "ToTimestamp" => ToTimestamp, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_archive_search(archive_id, from_timestamp, max_results, to_timestamp) + start_archive_search(archive_id, from_timestamp, max_results, to_timestamp, params::Dict{String,<:Any}) + +Initiates a search across emails in the specified archive. + +# Arguments +- `archive_id`: The identifier of the archive to search emails in. +- `from_timestamp`: The start timestamp of the range to search emails from. +- `max_results`: The maximum number of search results to return. +- `to_timestamp`: The end timestamp of the range to search emails from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Criteria to filter which emails are included in the search results. +""" +function start_archive_search( + ArchiveId, + FromTimestamp, + MaxResults, + ToTimestamp; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StartArchiveSearch", + Dict{String,Any}( + "ArchiveId" => ArchiveId, + "FromTimestamp" => FromTimestamp, + "MaxResults" => MaxResults, + "ToTimestamp" => ToTimestamp, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_archive_search( + ArchiveId, + FromTimestamp, + MaxResults, + ToTimestamp, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StartArchiveSearch", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ArchiveId" => ArchiveId, + "FromTimestamp" => FromTimestamp, + "MaxResults" => MaxResults, + "ToTimestamp" => ToTimestamp, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_archive_export(export_id) + stop_archive_export(export_id, params::Dict{String,<:Any}) + +Stops an in-progress export of emails from an archive. + +# Arguments +- `export_id`: The identifier of the export job to stop. + +""" +function stop_archive_export(ExportId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "StopArchiveExport", + Dict{String,Any}("ExportId" => ExportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_archive_export( + ExportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StopArchiveExport", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ExportId" => ExportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_archive_search(search_id) + stop_archive_search(search_id, params::Dict{String,<:Any}) + +Stops an in-progress archive search job. + +# Arguments +- `search_id`: The identifier of the search job to stop. + +""" +function stop_archive_search(SearchId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "StopArchiveSearch", + Dict{String,Any}("SearchId" => SearchId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_archive_search( + SearchId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "StopArchiveSearch", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SearchId" => SearchId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + + Adds one or more tags (keys and values) to a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. +- `tags`: The tags used to organize, track, or control access for the resource. For + example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "TagResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + + Remove one or more tags (keys and values) from a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to untag. +- `tag_keys`: The keys of the key-value pairs for the tag or tags you want to remove from + the specified resource. + +""" +function untag_resource( + ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "UntagResource", + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_archive(archive_id) + update_archive(archive_id, params::Dict{String,<:Any}) + +Updates the attributes of an existing email archive. + +# Arguments +- `archive_id`: The identifier of the archive to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ArchiveName"`: A new, unique name for the archive. +- `"Retention"`: A new retention period for emails in the archive. +""" +function update_archive(ArchiveId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "UpdateArchive", + Dict{String,Any}("ArchiveId" => ArchiveId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_archive( + ArchiveId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "UpdateArchive", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ArchiveId" => ArchiveId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_ingress_point(ingress_point_id) + update_ingress_point(ingress_point_id, params::Dict{String,<:Any}) + +Update attributes of a provisioned ingress endpoint resource. + +# Arguments +- `ingress_point_id`: The identifier for the ingress endpoint you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IngressPointConfiguration"`: If you choose an Authenticated ingress endpoint, you must + configure either an SMTP password or a secret ARN. +- `"IngressPointName"`: A user friendly name for the ingress endpoint resource. +- `"RuleSetId"`: The identifier of an existing rule set that you attach to an ingress + endpoint resource. +- `"StatusToUpdate"`: The update status of an ingress endpoint. +- `"TrafficPolicyId"`: The identifier of an existing traffic policy that you attach to an + ingress endpoint resource. +""" +function update_ingress_point( + IngressPointId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "UpdateIngressPoint", + Dict{String,Any}("IngressPointId" => IngressPointId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_ingress_point( + IngressPointId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "UpdateIngressPoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("IngressPointId" => IngressPointId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_relay(relay_id) + update_relay(relay_id, params::Dict{String,<:Any}) + +Updates the attributes of an existing relay resource. + +# Arguments +- `relay_id`: The unique relay identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Authentication"`: Authentication for the relay destination server—specify the + secretARN where the SMTP credentials are stored. +- `"RelayName"`: The name of the relay resource. +- `"ServerName"`: The destination relay server address. +- `"ServerPort"`: The destination relay server port. +""" +function update_relay(RelayId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "UpdateRelay", + Dict{String,Any}("RelayId" => RelayId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_relay( + RelayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "UpdateRelay", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("RelayId" => RelayId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_rule_set(rule_set_id) + update_rule_set(rule_set_id, params::Dict{String,<:Any}) + +>Update attributes of an already provisioned rule set. + +# Arguments +- `rule_set_id`: The identifier of a rule set you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RuleSetName"`: A user-friendly name for the rule set resource. +- `"Rules"`: A new set of rules to replace the current rules of the rule set—these rules + will override all the rules of the rule set. +""" +function update_rule_set(RuleSetId; aws_config::AbstractAWSConfig=global_aws_config()) + return mailmanager( + "UpdateRuleSet", + Dict{String,Any}("RuleSetId" => RuleSetId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_rule_set( + RuleSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "UpdateRuleSet", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RuleSetId" => RuleSetId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_traffic_policy(traffic_policy_id) + update_traffic_policy(traffic_policy_id, params::Dict{String,<:Any}) + +Update attributes of an already provisioned traffic policy resource. + +# Arguments +- `traffic_policy_id`: The identifier of the traffic policy that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DefaultAction"`: Default action instructs the traffic policy to either Allow or Deny + (block) messages that fall outside of (or not addressed by) the conditions of your policy + statements +- `"MaxMessageSizeBytes"`: The maximum message size in bytes of email which is allowed in + by this traffic policy—anything larger will be blocked. +- `"PolicyStatements"`: The list of conditions to be updated for filtering email traffic. +- `"TrafficPolicyName"`: A user-friendly name for the traffic policy resource. +""" +function update_traffic_policy( + TrafficPolicyId; aws_config::AbstractAWSConfig=global_aws_config() +) + return mailmanager( + "UpdateTrafficPolicy", + Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_traffic_policy( + TrafficPolicyId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mailmanager( + "UpdateTrafficPolicy", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrafficPolicyId" => TrafficPolicyId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/managedblockchain.jl b/src/services/managedblockchain.jl index c3effc2cf9..314833385e 100644 --- a/src/services/managedblockchain.jl +++ b/src/services/managedblockchain.jl @@ -8,8 +8,8 @@ using AWS.UUIDs create_accessor(accessor_type, client_request_token) create_accessor(accessor_type, client_request_token, params::Dict{String,<:Any}) -Creates a new accessor for use with Managed Blockchain Ethereum nodes. An accessor contains -information required for token based access to your Ethereum nodes. +Creates a new accessor for use with Amazon Managed Blockchain service that supports token +based access. The accessor contains information required for token based access. # Arguments - `accessor_type`: The type of accessor. Currently, accessor type is restricted to @@ -22,6 +22,12 @@ information required for token based access to your Ethereum nodes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NetworkType"`: The blockchain network that the Accessor token is created for. Use + the actual networkType value for the blockchain network that you are creating the Accessor + token for. With the shut down of the Ethereum Goerli and Polygon Mumbai Testnet networks + the following networkType values are no longer available for selection and use. + ETHEREUM_MAINNET_AND_GOERLI ETHEREUM_GOERLI POLYGON_MUMBAI However, your + existing Accessor tokens with these networkType values will remain unchanged. - `"Tags"`: Tags to assign to the Accessor. Each tag consists of a key and an optional value. You can specify multiple key-value pairs in a single request with an overall maximum of 50 tags allowed per resource. For more information about tags, see Tagging Resources in @@ -229,8 +235,7 @@ Ethereum. client. It is generated automatically if you use an Amazon Web Services SDK or the CLI. - `node_configuration`: The properties of a node configuration. - `network_id`: The unique identifier of the network for the node. Ethereum public networks - have the following NetworkIds: n-ethereum-mainnet n-ethereum-goerli - n-ethereum-rinkeby + have the following NetworkIds: n-ethereum-mainnet # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -450,8 +455,7 @@ cannot be recovered. Applies to Hyperledger Fabric and Ethereum. # Arguments - `network_id`: The unique identifier of the network that the node is on. Ethereum public - networks have the following NetworkIds: n-ethereum-mainnet n-ethereum-goerli - n-ethereum-rinkeby + networks have the following NetworkIds: n-ethereum-mainnet - `node_id`: The unique identifier of the node. # Optional Parameters @@ -665,6 +669,9 @@ have the information required for token based access to your Ethereum nodes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of accessors to list. +- `"networkType"`: The blockchain network that the Accessor token is created for. Use the + value ETHEREUM_MAINNET_AND_GOERLI for all existing Accessors tokens that were created + before the networkType property was introduced. - `"nextToken"`: The pagination token that indicates the next set of results to retrieve. """ function list_accessors(; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/managedblockchain_query.jl b/src/services/managedblockchain_query.jl new file mode 100644 index 0000000000..589166a2d9 --- /dev/null +++ b/src/services/managedblockchain_query.jl @@ -0,0 +1,440 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: managedblockchain_query +using AWS.Compat +using AWS.UUIDs + +""" + batch_get_token_balance() + batch_get_token_balance(params::Dict{String,<:Any}) + +Gets the token balance for a batch of tokens by using the BatchGetTokenBalance action for +every token in the request. Only the native tokens BTC and ETH, and the ERC-20, ERC-721, +and ERC 1155 token standards are supported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"getTokenBalanceInputs"`: An array of BatchGetTokenBalanceInputItem objects whose + balance is being requested. +""" +function batch_get_token_balance(; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/batch-get-token-balance"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_token_balance( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/batch-get-token-balance", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_asset_contract(contract_identifier) + get_asset_contract(contract_identifier, params::Dict{String,<:Any}) + +Gets the information about a specific contract deployed on the blockchain. The Bitcoin +blockchain networks do not support this operation. Metadata is currently only available +for some ERC-20 contracts. Metadata will be available for additional contracts in the +future. + +# Arguments +- `contract_identifier`: Contains the blockchain address and network information about the + contract. + +""" +function get_asset_contract( + contractIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/get-asset-contract", + Dict{String,Any}("contractIdentifier" => contractIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset_contract( + contractIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/get-asset-contract", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("contractIdentifier" => contractIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_token_balance(owner_identifier, token_identifier) + get_token_balance(owner_identifier, token_identifier, params::Dict{String,<:Any}) + +Gets the balance of a specific token, including native tokens, for a given address (wallet +or contract) on the blockchain. Only the native tokens BTC and ETH, and the ERC-20, +ERC-721, and ERC 1155 token standards are supported. + +# Arguments +- `owner_identifier`: The container for the identifier for the owner. +- `token_identifier`: The container for the identifier for the token, including the unique + token ID and its blockchain network. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"atBlockchainInstant"`: The time for when the TokenBalance is requested or the current + time if a time is not provided in the request. This time will only be recorded up to the + second. +""" +function get_token_balance( + ownerIdentifier, tokenIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/get-token-balance", + Dict{String,Any}( + "ownerIdentifier" => ownerIdentifier, "tokenIdentifier" => tokenIdentifier + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_token_balance( + ownerIdentifier, + tokenIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/get-token-balance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ownerIdentifier" => ownerIdentifier, + "tokenIdentifier" => tokenIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_transaction(network) + get_transaction(network, params::Dict{String,<:Any}) + +Gets the details of a transaction. This action will return transaction details for all +transactions that are confirmed on the blockchain, even if they have not reached finality. + +# Arguments +- `network`: The blockchain network where the transaction occurred. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"transactionHash"`: The hash of a transaction. It is generated when a transaction is + created. +- `"transactionId"`: The identifier of a Bitcoin transaction. It is generated when a + transaction is created. transactionId is only supported on the Bitcoin networks. +""" +function get_transaction(network; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/get-transaction", + Dict{String,Any}("network" => network); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_transaction( + network, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/get-transaction", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("network" => network), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_asset_contracts(contract_filter) + list_asset_contracts(contract_filter, params::Dict{String,<:Any}) + +Lists all the contracts for a given contract type deployed by an address (either a contract +address or a wallet address). The Bitcoin blockchain networks do not support this operation. + +# Arguments +- `contract_filter`: Contains the filter parameter for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of contracts to list. Default: 100 Even if + additional results can be retrieved, the request can return less results than maxResults or + an empty array of results. To retrieve the next set of results, make another request with + the returned nextToken value. The value of nextToken is null when there are no more results + to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +""" +function list_asset_contracts( + contractFilter; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-asset-contracts", + Dict{String,Any}("contractFilter" => contractFilter); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_contracts( + contractFilter, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-asset-contracts", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("contractFilter" => contractFilter), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_filtered_transaction_events(address_identifier_filter, network) + list_filtered_transaction_events(address_identifier_filter, network, params::Dict{String,<:Any}) + +Lists all the transaction events for an address on the blockchain. This operation is only +supported on the Bitcoin networks. + +# Arguments +- `address_identifier_filter`: This is the unique public address on the blockchain for + which the transaction events are being requested. +- `network`: The blockchain network where the transaction occurred. Valid Values: + BITCOIN_MAINNET | BITCOIN_TESTNET + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"confirmationStatusFilter"`: +- `"maxResults"`: The maximum number of transaction events to list. Default: 100 Even if + additional results can be retrieved, the request can return less results than maxResults or + an empty array of results. To retrieve the next set of results, make another request with + the returned nextToken value. The value of nextToken is null when there are no more results + to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"sort"`: The order by which the results will be sorted. +- `"timeFilter"`: This container specifies the time frame for the transaction events + returned in the response. +- `"voutFilter"`: This container specifies filtering attributes related to BITCOIN_VOUT + event types +""" +function list_filtered_transaction_events( + addressIdentifierFilter, network; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-filtered-transaction-events", + Dict{String,Any}( + "addressIdentifierFilter" => addressIdentifierFilter, "network" => network + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_filtered_transaction_events( + addressIdentifierFilter, + network, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-filtered-transaction-events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "addressIdentifierFilter" => addressIdentifierFilter, + "network" => network, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_token_balances(token_filter) + list_token_balances(token_filter, params::Dict{String,<:Any}) + +This action returns the following for a given blockchain network: Lists all token +balances owned by an address (either a contract address or a wallet address). Lists all +token balances for all tokens created by a contract. Lists all token balances for a given +token. You must always specify the network property of the tokenFilter when using this +operation. + +# Arguments +- `token_filter`: The contract address or a token identifier on the blockchain network by + which to filter the request. You must specify the contractAddress property of this + container when listing tokens minted by a contract. You must always specify the network + property of this container when using this operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of token balances to return. Default: 100 Even if + additional results can be retrieved, the request can return less results than maxResults or + an empty array of results. To retrieve the next set of results, make another request with + the returned nextToken value. The value of nextToken is null when there are no more results + to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"ownerFilter"`: The contract or wallet address on the blockchain network by which to + filter the request. You must specify the address property of the ownerFilter when listing + balances of tokens owned by the address. +""" +function list_token_balances(tokenFilter; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/list-token-balances", + Dict{String,Any}("tokenFilter" => tokenFilter); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_token_balances( + tokenFilter, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-token-balances", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("tokenFilter" => tokenFilter), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_transaction_events(network) + list_transaction_events(network, params::Dict{String,<:Any}) + +Lists all the transaction events for a transaction This action will return transaction +details for all transactions that are confirmed on the blockchain, even if they have not +reached finality. + +# Arguments +- `network`: The blockchain network where the transaction events occurred. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of transaction events to list. Default: 100 Even if + additional results can be retrieved, the request can return less results than maxResults or + an empty array of results. To retrieve the next set of results, make another request with + the returned nextToken value. The value of nextToken is null when there are no more results + to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"transactionHash"`: The hash of a transaction. It is generated when a transaction is + created. +- `"transactionId"`: The identifier of a Bitcoin transaction. It is generated when a + transaction is created. transactionId is only supported on the Bitcoin networks. +""" +function list_transaction_events(network; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/list-transaction-events", + Dict{String,Any}("network" => network); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_transaction_events( + network, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-transaction-events", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("network" => network), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_transactions(address, network) + list_transactions(address, network, params::Dict{String,<:Any}) + +Lists all the transaction events for a transaction. + +# Arguments +- `address`: The address (either a contract or wallet), whose transactions are being + requested. +- `network`: The blockchain network where the transactions occurred. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"confirmationStatusFilter"`: This filter is used to include transactions in the response + that haven't reached finality . Transactions that have reached finality are always part of + the response. +- `"fromBlockchainInstant"`: +- `"maxResults"`: The maximum number of transactions to list. Default: 100 Even if + additional results can be retrieved, the request can return less results than maxResults or + an empty array of results. To retrieve the next set of results, make another request with + the returned nextToken value. The value of nextToken is null when there are no more results + to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"sort"`: The order by which the results will be sorted. +- `"toBlockchainInstant"`: +""" +function list_transactions( + address, network; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-transactions", + Dict{String,Any}("address" => address, "network" => network); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_transactions( + address, + network, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-transactions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("address" => address, "network" => network), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/marketplace_agreement.jl b/src/services/marketplace_agreement.jl new file mode 100644 index 0000000000..6a80506474 --- /dev/null +++ b/src/services/marketplace_agreement.jl @@ -0,0 +1,136 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: marketplace_agreement +using AWS.Compat +using AWS.UUIDs + +""" + describe_agreement(agreement_id) + describe_agreement(agreement_id, params::Dict{String,<:Any}) + +Provides details about an agreement, such as the proposer, acceptor, start date, and end +date. + +# Arguments +- `agreement_id`: The unique identifier of the agreement. + +""" +function describe_agreement(agreementId; aws_config::AbstractAWSConfig=global_aws_config()) + return marketplace_agreement( + "DescribeAgreement", + Dict{String,Any}("agreementId" => agreementId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_agreement( + agreementId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_agreement( + "DescribeAgreement", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("agreementId" => agreementId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agreement_terms(agreement_id) + get_agreement_terms(agreement_id, params::Dict{String,<:Any}) + +Obtains details about the terms in an agreement that you participated in as proposer or +acceptor. The details include: TermType – The type of term, such as LegalTerm, +RenewalTerm, or ConfigurableUpfrontPricingTerm. TermID – The ID of the particular +term, which is common between offer and agreement. TermPayload – The key information +contained in the term, such as the EULA for LegalTerm or pricing and dimensions for various +pricing terms, such as ConfigurableUpfrontPricingTerm or UsageBasedPricingTerm. +Configuration – The buyer/acceptor's selection at the time of agreement creation, such as +the number of units purchased for a dimension or setting the EnableAutoRenew flag. + +# Arguments +- `agreement_id`: The unique identifier of the agreement. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of agreements to return in the response. +- `"nextToken"`: A token to specify where to start pagination +""" +function get_agreement_terms(agreementId; aws_config::AbstractAWSConfig=global_aws_config()) + return marketplace_agreement( + "GetAgreementTerms", + Dict{String,Any}("agreementId" => agreementId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agreement_terms( + agreementId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_agreement( + "GetAgreementTerms", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("agreementId" => agreementId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_agreements() + search_agreements(params::Dict{String,<:Any}) + +Searches across all agreements that a proposer or an acceptor has in AWS Marketplace. The +search returns a list of agreements with basic agreement information. The following filter +combinations are supported: PartyType as Proposer + AgreementType + ResourceIdentifier + PartyType as Proposer + AgreementType + OfferId PartyType as Proposer + AgreementType ++ AcceptorAccountId PartyType as Proposer + AgreementType + Status PartyType as +Proposer + AgreementType + ResourceIdentifier + Status PartyType as Proposer + +AgreementType + OfferId + Status PartyType as Proposer + AgreementType + +AcceptorAccountId + Status PartyType as Proposer + AgreementType + ResourceType + +Status PartyType as Proposer + AgreementType + AcceptorAccountId + ResourceType + +Status PartyType as Proposer + AgreementType + AcceptorAccountId + OfferId +PartyType as Proposer + AgreementType + AcceptorAccountId + OfferId + Status PartyType +as Proposer + AgreementType + AcceptorAccountId + ResourceIdentifier PartyType as +Proposer + AgreementType + AcceptorAccountId + ResourceIdentifier + Status PartyType as +Proposer + AgreementType + AcceptorAccountId + ResourceType + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"catalog"`: The catalog in which the agreement was created. +- `"filters"`: The filter name and value pair used to return a specific list of results. + The following filters are supported: ResourceIdentifier – The unique identifier of the + resource. ResourceType – Type of the resource, which is the product (AmiProduct, + ContainerProduct, or SaaSProduct). PartyType – The party type (either Acceptor or + Proposer) of the caller. For agreements where the caller is the proposer, use the Proposer + filter. For agreements where the caller is the acceptor, use the Acceptor filter. + AcceptorAccountId – The AWS account ID of the party accepting the agreement terms. + OfferId – The unique identifier of the offer in which the terms are registered in the + agreement token. Status – The current status of the agreement. Values include ACTIVE, + ARCHIVED, CANCELLED, EXPIRED, RENEWED, REPLACED, and TERMINATED. BeforeEndTime – A + date used to filter agreements with a date before the endTime of an agreement. + AfterEndTime – A date used to filter agreements with a date after the endTime of an + agreement. AgreementType – The type of agreement. Values include PurchaseAgreement or + VendorInsightsAgreement. +- `"maxResults"`: The maximum number of agreements to return in the response. +- `"nextToken"`: A token to specify where to start pagination. +- `"sort"`: An object that contains the SortBy and SortOrder attributes. +""" +function search_agreements(; aws_config::AbstractAWSConfig=global_aws_config()) + return marketplace_agreement( + "SearchAgreements"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function search_agreements( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return marketplace_agreement( + "SearchAgreements", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end diff --git a/src/services/marketplace_catalog.jl b/src/services/marketplace_catalog.jl index 0743e84e59..8dfe921e37 100644 --- a/src/services/marketplace_catalog.jl +++ b/src/services/marketplace_catalog.jl @@ -4,6 +4,46 @@ using AWS.AWSServices: marketplace_catalog using AWS.Compat using AWS.UUIDs +""" + batch_describe_entities(entity_request_list) + batch_describe_entities(entity_request_list, params::Dict{String,<:Any}) + +Returns metadata and content for multiple entities. This is the Batch version of the +DescribeEntity API and uses the same IAM permission action as DescribeEntity API. + +# Arguments +- `entity_request_list`: List of entity IDs and the catalogs the entities are present in. + +""" +function batch_describe_entities( + EntityRequestList; aws_config::AbstractAWSConfig=global_aws_config() +) + return marketplace_catalog( + "POST", + "/BatchDescribeEntities", + Dict{String,Any}("EntityRequestList" => EntityRequestList); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_describe_entities( + EntityRequestList, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_catalog( + "POST", + "/BatchDescribeEntities", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("EntityRequestList" => EntityRequestList), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_change_set(catalog, change_set_id) cancel_change_set(catalog, change_set_id, params::Dict{String,<:Any}) @@ -54,10 +94,10 @@ end delete_resource_policy(resource_arn) delete_resource_policy(resource_arn, params::Dict{String,<:Any}) -Deletes a resource-based policy on an Entity that is identified by its resource ARN. +Deletes a resource-based policy on an entity that is identified by its resource ARN. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the Entity resource that is associated +- `resource_arn`: The Amazon Resource Name (ARN) of the entity resource that is associated with the resource policy. """ @@ -179,10 +219,10 @@ end get_resource_policy(resource_arn) get_resource_policy(resource_arn, params::Dict{String,<:Any}) -Gets a resource-based policy of an Entity that is identified by its resource ARN. +Gets a resource-based policy of an entity that is identified by its resource ARN. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the Entity resource that is associated +- `resource_arn`: The Amazon Resource Name (ARN) of the entity resource that is associated with the resource policy. """ @@ -263,17 +303,28 @@ Provides the list of entities of a given type. # Arguments - `catalog`: The catalog related to the request. Fixed value: AWSMarketplace -- `entity_type`: The type of entities to retrieve. +- `entity_type`: The type of entities to retrieve. Valid values are: AmiProduct, + ContainerProduct, DataProduct, SaaSProduct, ProcurementPolicy, Experience, Audience, + BrandingSettings, Offer, Seller, ResaleAuthorization. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EntityTypeFilters"`: A Union object containing filter shapes for all EntityTypes. Each + EntityTypeFilter shape will have filters applicable for that EntityType that can be used to + search or filter entities. +- `"EntityTypeSort"`: A Union object containing Sort shapes for all EntityTypes. Each + EntityTypeSort shape will have SortBy and SortOrder applicable for fields on that + EntityType. This can be used to sort the results of the filter query. - `"FilterList"`: An array of filter objects. Each filter object contains two attributes, filterName and filterValues. - `"MaxResults"`: Specifies the upper limit of the elements on a single page. If a value isn't provided, the default value is 20. - `"NextToken"`: The value of the next token, if it exists. Null if there are no more results. -- `"OwnershipType"`: +- `"OwnershipType"`: Filters the returned set of entities based on their owner. The default + is SELF. To list entities shared with you through AWS Resource Access Manager (AWS RAM), + set to SHARED. Entities shared through the AWS Marketplace Catalog API PutResourcePolicy + operation can't be discovered through the SHARED parameter. - `"Sort"`: An object that contains two attributes, SortBy and SortOrder. """ function list_entities( @@ -350,12 +401,12 @@ end put_resource_policy(policy, resource_arn) put_resource_policy(policy, resource_arn, params::Dict{String,<:Any}) -Attaches a resource-based policy to an Entity. Examples of an entity include: AmiProduct +Attaches a resource-based policy to an entity. Examples of an entity include: AmiProduct and ContainerProduct. # Arguments - `policy`: The policy document to set; formatted in JSON. -- `resource_arn`: The Amazon Resource Name (ARN) of the Entity resource you want to +- `resource_arn`: The Amazon Resource Name (ARN) of the entity resource you want to associate with a resource policy. """ @@ -403,8 +454,8 @@ set containing a change against an entity that is already locked, you will recei ResourceInUseException error. For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1). For more information about working -with change sets, see Working with change sets. For information on change types for -single-AMI products, see Working with single-AMI products. Als, for more information on +with change sets, see Working with change sets. For information about change types for +single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products. # Arguments @@ -418,6 +469,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ChangeSetTags"`: A list of objects specifying each key name and value for the ChangeSetTags property. - `"ClientRequestToken"`: A unique token to identify the request to ensure idempotency. +- `"Intent"`: The intent related to the request. The default is APPLY. To test your request + before applying changes to your entities, use VALIDATE. This feature is currently available + for adding versions to single-AMI products. For more information, see Add a new version. """ function start_change_set( Catalog, ChangeSet; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/marketplace_commerce_analytics.jl b/src/services/marketplace_commerce_analytics.jl index ed066ea125..c4bc47d3b2 100644 --- a/src/services/marketplace_commerce_analytics.jl +++ b/src/services/marketplace_commerce_analytics.jl @@ -136,46 +136,51 @@ end start_support_data_export(data_set_type, destination_s3_bucket_name, from_date, role_name_arn, sns_topic_arn) start_support_data_export(data_set_type, destination_s3_bucket_name, from_date, role_name_arn, sns_topic_arn, params::Dict{String,<:Any}) -Given a data set type and a from date, asynchronously publishes the requested customer -support data to the specified S3 bucket and notifies the specified SNS topic once the data -is available. Returns a unique request identifier that can be used to correlate requests -with notifications from the SNS topic. Data sets will be published in comma-separated -values (CSV) format with the file name {data_set_type}_YYYY-MM-DD'T'HH-mm-ss'Z'.csv. If a -file with the same name already exists (e.g. if the same data set is requested twice), the -original file will be overwritten by the new file. Requires a Role with an attached -permissions policy providing Allow permissions for the following actions: s3:PutObject, -s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish, iam:GetRolePolicy. +This target has been deprecated. Given a data set type and a from date, asynchronously +publishes the requested customer support data to the specified S3 bucket and notifies the +specified SNS topic once the data is available. Returns a unique request identifier that +can be used to correlate requests with notifications from the SNS topic. Data sets will be +published in comma-separated values (CSV) format with the file name +{data_set_type}_YYYY-MM-DD'T'HH-mm-ss'Z'.csv. If a file with the same name already exists +(e.g. if the same data set is requested twice), the original file will be overwritten by +the new file. Requires a Role with an attached permissions policy providing Allow +permissions for the following actions: s3:PutObject, s3:GetBucketLocation, +sns:GetTopicAttributes, sns:Publish, iam:GetRolePolicy. # Arguments -- `data_set_type`: Specifies the data set type to be written to the output csv file. The - data set types customer_support_contacts_data and test_customer_support_contacts_data both - result in a csv file containing the following fields: Product Id, Product Code, Customer - Guid, Subscription Guid, Subscription Start Date, Organization, AWS Account Id, Given Name, - Surname, Telephone Number, Email, Title, Country Code, ZIP Code, Operation Type, and - Operation Time. customer_support_contacts_data Customer support contact data. The data - set will contain all changes (Creates, Updates, and Deletes) to customer support contact - data from the date specified in the from_date parameter. - test_customer_support_contacts_data An example data set containing static test data in the - same format as customer_support_contacts_data -- `destination_s3_bucket_name`: The name (friendly name, not ARN) of the destination S3 - bucket. -- `from_date`: The start date from which to retrieve the data set in UTC. This parameter - only affects the customer_support_contacts_data data set type. -- `role_name_arn`: The Amazon Resource Name (ARN) of the Role with an attached permissions - policy to interact with the provided AWS services. -- `sns_topic_arn`: Amazon Resource Name (ARN) for the SNS Topic that will be notified when - the data set has been published or if an error has occurred. +- `data_set_type`: This target has been deprecated. Specifies the data set type to be + written to the output csv file. The data set types customer_support_contacts_data and + test_customer_support_contacts_data both result in a csv file containing the following + fields: Product Id, Product Code, Customer Guid, Subscription Guid, Subscription Start + Date, Organization, AWS Account Id, Given Name, Surname, Telephone Number, Email, Title, + Country Code, ZIP Code, Operation Type, and Operation Time. + customer_support_contacts_data Customer support contact data. The data set will contain all + changes (Creates, Updates, and Deletes) to customer support contact data from the date + specified in the from_date parameter. test_customer_support_contacts_data An example data + set containing static test data in the same format as customer_support_contacts_data +- `destination_s3_bucket_name`: This target has been deprecated. The name (friendly name, + not ARN) of the destination S3 bucket. +- `from_date`: This target has been deprecated. The start date from which to retrieve the + data set in UTC. This parameter only affects the customer_support_contacts_data data set + type. +- `role_name_arn`: This target has been deprecated. The Amazon Resource Name (ARN) of the + Role with an attached permissions policy to interact with the provided AWS services. +- `sns_topic_arn`: This target has been deprecated. Amazon Resource Name (ARN) for the SNS + Topic that will be notified when the data set has been published or if an error has + occurred. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"customerDefinedValues"`: (Optional) Key-value pairs which will be returned, unmodified, - in the Amazon SNS notification message and the data set metadata file. -- `"destinationS3Prefix"`: (Optional) The desired S3 prefix for the published data set, - similar to a directory path in standard file systems. For example, if given the bucket name - \"mybucket\" and the prefix \"myprefix/mydatasets\", the output file \"outputfile\" would - be published to \"s3://mybucket/myprefix/mydatasets/outputfile\". If the prefix directory - structure does not exist, it will be created. If no prefix is provided, the data set will - be published to the S3 bucket root. +- `"customerDefinedValues"`: This target has been deprecated. (Optional) Key-value pairs + which will be returned, unmodified, in the Amazon SNS notification message and the data set + metadata file. +- `"destinationS3Prefix"`: This target has been deprecated. (Optional) The desired S3 + prefix for the published data set, similar to a directory path in standard file systems. + For example, if given the bucket name \"mybucket\" and the prefix \"myprefix/mydatasets\", + the output file \"outputfile\" would be published to + \"s3://mybucket/myprefix/mydatasets/outputfile\". If the prefix directory structure does + not exist, it will be created. If no prefix is provided, the data set will be published to + the S3 bucket root. """ function start_support_data_export( dataSetType, diff --git a/src/services/marketplace_deployment.jl b/src/services/marketplace_deployment.jl new file mode 100644 index 0000000000..ab97f638eb --- /dev/null +++ b/src/services/marketplace_deployment.jl @@ -0,0 +1,185 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: marketplace_deployment +using AWS.Compat +using AWS.UUIDs + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all tags that have been added to a deployment parameter resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) associated with the deployment parameter + resource you want to list tags on. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return marketplace_deployment( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_deployment( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_deployment_parameter(agreement_id, catalog, deployment_parameter, product_id) + put_deployment_parameter(agreement_id, catalog, deployment_parameter, product_id, params::Dict{String,<:Any}) + +Creates or updates a deployment parameter and is targeted by catalog and agreementId. + +# Arguments +- `agreement_id`: The unique identifier of the agreement. +- `catalog`: The catalog related to the request. Fixed value: AWS Marketplace +- `deployment_parameter`: The deployment parameter targeted to the acceptor of an agreement + for which to create the AWS Secret Manager resource. +- `product_id`: The product for which AWS Marketplace will save secrets for the buyer’s + account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The idempotency token for deployment parameters. A unique identifier for + the new version. +- `"expirationDate"`: The date when deployment parameters expire and are scheduled for + deletion. +- `"tags"`: A map of key-value pairs, where each pair represents a tag saved to the + resource. Tags will only be applied for create operations, and they'll be ignored if the + resource already exists. +""" +function put_deployment_parameter( + agreementId, + catalog, + deploymentParameter, + productId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_deployment( + "POST", + "/catalogs/$(catalog)/products/$(productId)/deployment-parameters", + Dict{String,Any}( + "agreementId" => agreementId, + "deploymentParameter" => deploymentParameter, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_deployment_parameter( + agreementId, + catalog, + deploymentParameter, + productId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_deployment( + "POST", + "/catalogs/$(catalog)/products/$(productId)/deployment-parameters", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "agreementId" => agreementId, + "deploymentParameter" => deploymentParameter, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn) + tag_resource(resource_arn, params::Dict{String,<:Any}) + +Tags a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) associated with the resource you want to + tag. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: A map of key-value pairs, where each pair represents a tag present on the + resource. +""" +function tag_resource(resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return marketplace_deployment( + "POST", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_deployment( + "POST", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag or list of tags from a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) associated with the resource you want to + remove the tag from. +- `tag_keys`: A list of key names of tags to be removed. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return marketplace_deployment( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return marketplace_deployment( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/mediaconnect.jl b/src/services/mediaconnect.jl index 2a133e00c8..f286958480 100644 --- a/src/services/mediaconnect.jl +++ b/src/services/mediaconnect.jl @@ -589,6 +589,39 @@ function describe_flow( ) end +""" + describe_flow_source_metadata(flow_arn) + describe_flow_source_metadata(flow_arn, params::Dict{String,<:Any}) + +Displays details of the flow's source stream. The response contains information about the +contents of the stream and its programs. + +# Arguments +- `flow_arn`: The Amazon Resource Name (ARN) of the flow. + +""" +function describe_flow_source_metadata( + flowArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return mediaconnect( + "GET", + "/v1/flows/$(flowArn)/source-metadata"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_flow_source_metadata( + flowArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mediaconnect( + "GET", + "/v1/flows/$(flowArn)/source-metadata", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_gateway(gateway_arn) describe_gateway(gateway_arn, params::Dict{String,<:Any}) diff --git a/src/services/mediaconvert.jl b/src/services/mediaconvert.jl index 79b698397d..4e2d914123 100644 --- a/src/services/mediaconvert.jl +++ b/src/services/mediaconvert.jl @@ -441,8 +441,9 @@ end describe_endpoints() describe_endpoints(params::Dict{String,<:Any}) -Send an request with an empty body to the regional API endpoint to get your account API -endpoint. +Send a request with an empty body to the regional API endpoint to get your account API +endpoint. Note that DescribeEndpoints is no longer required. We recommend that you send +your requests directly to the regional endpoint instead. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -511,7 +512,7 @@ end get_job(id) get_job(id, params::Dict{String,<:Any}) -Retrieve the JSON for a specific completed transcoding job. +Retrieve the JSON for a specific transcoding job. # Arguments - `id`: the job ID of the job. @@ -871,6 +872,45 @@ function put_policy( ) end +""" + search_jobs() + search_jobs(params::Dict{String,<:Any}) + +Retrieve a JSON array that includes job details for up to twenty of your most recent jobs. +Optionally filter results further according to input file, queue, or status. To retrieve +the twenty next most recent jobs, use the nextToken string returned with the array. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"inputFile"`: Optional. Provide your input file URL or your partial input file name. The + maximum length for an input file is 300 characters. +- `"maxResults"`: Optional. Number of jobs, up to twenty, that will be returned at one time. +- `"nextToken"`: Optional. Use this string, provided with the response to a previous + request, to request the next batch of jobs. +- `"order"`: Optional. When you request lists of resources, you can specify whether they + are sorted in ASCENDING or DESCENDING order. Default varies by resource. +- `"queue"`: Optional. Provide a queue name, or a queue ARN, to return only jobs from that + queue. +- `"status"`: Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, + or ERROR. +""" +function search_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return mediaconvert( + "GET", "/2017-08-29/search"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function search_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mediaconvert( + "GET", + "/2017-08-29/search", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(arn, tags) tag_resource(arn, tags, params::Dict{String,<:Any}) diff --git a/src/services/medialive.jl b/src/services/medialive.jl index 13a7f6224b..6a03c9a9a0 100644 --- a/src/services/medialive.jl +++ b/src/services/medialive.jl @@ -275,6 +275,245 @@ function create_channel( ) end +""" + create_cloud_watch_alarm_template(comparison_operator, evaluation_periods, group_identifier, metric_name, name, period, statistic, target_resource_type, threshold, treat_missing_data) + create_cloud_watch_alarm_template(comparison_operator, evaluation_periods, group_identifier, metric_name, name, period, statistic, target_resource_type, threshold, treat_missing_data, params::Dict{String,<:Any}) + +Creates a cloudwatch alarm template to dynamically generate cloudwatch metric alarms on +targeted resource types. + +# Arguments +- `comparison_operator`: +- `evaluation_periods`: The number of periods over which data is compared to the specified + threshold. +- `group_identifier`: A cloudwatch alarm template group's identifier. Can be either be its + id or current name. +- `metric_name`: The name of the metric associated with the alarm. Must be compatible with + targetResourceType. +- `name`: A resource's name. Names must be unique within the scope of a resource type in a + specific region. +- `period`: The period, in seconds, over which the specified statistic is applied. +- `statistic`: +- `target_resource_type`: +- `threshold`: The threshold value to compare with the specified statistic. +- `treat_missing_data`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datapointsToAlarm"`: The number of datapoints within the evaluation period that must be + breaching to trigger the alarm. +- `"description"`: A resource's optional description. +- `"tags"`: +""" +function create_cloud_watch_alarm_template( + comparisonOperator, + evaluationPeriods, + groupIdentifier, + metricName, + name, + period, + statistic, + targetResourceType, + threshold, + treatMissingData; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/cloudwatch-alarm-templates", + Dict{String,Any}( + "comparisonOperator" => comparisonOperator, + "evaluationPeriods" => evaluationPeriods, + "groupIdentifier" => groupIdentifier, + "metricName" => metricName, + "name" => name, + "period" => period, + "statistic" => statistic, + "targetResourceType" => targetResourceType, + "threshold" => threshold, + "treatMissingData" => treatMissingData, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cloud_watch_alarm_template( + comparisonOperator, + evaluationPeriods, + groupIdentifier, + metricName, + name, + period, + statistic, + targetResourceType, + threshold, + treatMissingData, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/cloudwatch-alarm-templates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "comparisonOperator" => comparisonOperator, + "evaluationPeriods" => evaluationPeriods, + "groupIdentifier" => groupIdentifier, + "metricName" => metricName, + "name" => name, + "period" => period, + "statistic" => statistic, + "targetResourceType" => targetResourceType, + "threshold" => threshold, + "treatMissingData" => treatMissingData, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_cloud_watch_alarm_template_group(name) + create_cloud_watch_alarm_template_group(name, params::Dict{String,<:Any}) + +Creates a cloudwatch alarm template group to group your cloudwatch alarm templates and to +attach to signal maps for dynamically creating alarms. + +# Arguments +- `name`: A resource's name. Names must be unique within the scope of a resource type in a + specific region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +- `"tags"`: +""" +function create_cloud_watch_alarm_template_group( + name; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/cloudwatch-alarm-template-groups", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cloud_watch_alarm_template_group( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/cloudwatch-alarm-template-groups", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_event_bridge_rule_template(event_type, group_identifier, name) + create_event_bridge_rule_template(event_type, group_identifier, name, params::Dict{String,<:Any}) + +Creates an eventbridge rule template to monitor events and send notifications to your +targeted resources. + +# Arguments +- `event_type`: +- `group_identifier`: An eventbridge rule template group's identifier. Can be either be its + id or current name. +- `name`: A resource's name. Names must be unique within the scope of a resource type in a + specific region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +- `"eventTargets"`: +- `"tags"`: +""" +function create_event_bridge_rule_template( + eventType, groupIdentifier, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/eventbridge-rule-templates", + Dict{String,Any}( + "eventType" => eventType, "groupIdentifier" => groupIdentifier, "name" => name + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_event_bridge_rule_template( + eventType, + groupIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/eventbridge-rule-templates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "eventType" => eventType, + "groupIdentifier" => groupIdentifier, + "name" => name, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_event_bridge_rule_template_group(name) + create_event_bridge_rule_template_group(name, params::Dict{String,<:Any}) + +Creates an eventbridge rule template group to group your eventbridge rule templates and to +attach to signal maps for dynamically creating notification rules. + +# Arguments +- `name`: A resource's name. Names must be unique within the scope of a resource type in a + specific region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +- `"tags"`: +""" +function create_event_bridge_rule_template_group( + name; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/eventbridge-rule-template-groups", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_event_bridge_rule_template_group( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/eventbridge-rule-template-groups", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_input() create_input(params::Dict{String,<:Any}) @@ -527,6 +766,62 @@ function create_partner_input( ) end +""" + create_signal_map(discovery_entry_point_arn, name) + create_signal_map(discovery_entry_point_arn, name, params::Dict{String,<:Any}) + +Initiates the creation of a new signal map. Will discover a new mediaResourceMap based on +the provided discoveryEntryPointArn. + +# Arguments +- `discovery_entry_point_arn`: A top-level supported AWS resource ARN to discovery a signal + map from. +- `name`: A resource's name. Names must be unique within the scope of a resource type in a + specific region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"cloudWatchAlarmTemplateGroupIdentifiers"`: +- `"description"`: A resource's optional description. +- `"eventBridgeRuleTemplateGroupIdentifiers"`: +- `"tags"`: +""" +function create_signal_map( + discoveryEntryPointArn, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/signal-maps", + Dict{String,Any}( + "discoveryEntryPointArn" => discoveryEntryPointArn, "name" => name + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_signal_map( + discoveryEntryPointArn, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/signal-maps", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "discoveryEntryPointArn" => discoveryEntryPointArn, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_tags(resource-arn) create_tags(resource-arn, params::Dict{String,<:Any}) @@ -595,29 +890,34 @@ function delete_channel( end """ - delete_input(input_id) - delete_input(input_id, params::Dict{String,<:Any}) + delete_cloud_watch_alarm_template(identifier) + delete_cloud_watch_alarm_template(identifier, params::Dict{String,<:Any}) -Deletes the input end point +Deletes a cloudwatch alarm template. # Arguments -- `input_id`: Unique ID of the input +- `identifier`: A cloudwatch alarm template's identifier. Can be either be its id or + current name. """ -function delete_input(inputId; aws_config::AbstractAWSConfig=global_aws_config()) +function delete_cloud_watch_alarm_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( "DELETE", - "/prod/inputs/$(inputId)"; + "/prod/cloudwatch-alarm-templates/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_input( - inputId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function delete_cloud_watch_alarm_template( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "DELETE", - "/prod/inputs/$(inputId)", + "/prod/cloudwatch-alarm-templates/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -625,33 +925,35 @@ function delete_input( end """ - delete_input_security_group(input_security_group_id) - delete_input_security_group(input_security_group_id, params::Dict{String,<:Any}) + delete_cloud_watch_alarm_template_group(identifier) + delete_cloud_watch_alarm_template_group(identifier, params::Dict{String,<:Any}) -Deletes an Input Security Group +Deletes a cloudwatch alarm template group. You must detach this group from all signal maps +and ensure its existing templates are moved to another group or deleted. # Arguments -- `input_security_group_id`: The Input Security Group to delete +- `identifier`: A cloudwatch alarm template group's identifier. Can be either be its id or + current name. """ -function delete_input_security_group( - inputSecurityGroupId; aws_config::AbstractAWSConfig=global_aws_config() +function delete_cloud_watch_alarm_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return medialive( "DELETE", - "/prod/inputSecurityGroups/$(inputSecurityGroupId)"; + "/prod/cloudwatch-alarm-template-groups/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_input_security_group( - inputSecurityGroupId, +function delete_cloud_watch_alarm_template_group( + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "DELETE", - "/prod/inputSecurityGroups/$(inputSecurityGroupId)", + "/prod/cloudwatch-alarm-template-groups/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -659,31 +961,34 @@ function delete_input_security_group( end """ - delete_multiplex(multiplex_id) - delete_multiplex(multiplex_id, params::Dict{String,<:Any}) + delete_event_bridge_rule_template(identifier) + delete_event_bridge_rule_template(identifier, params::Dict{String,<:Any}) -Delete a multiplex. The multiplex must be idle. +Deletes an eventbridge rule template. # Arguments -- `multiplex_id`: The ID of the multiplex. +- `identifier`: An eventbridge rule template's identifier. Can be either be its id or + current name. """ -function delete_multiplex(multiplexId; aws_config::AbstractAWSConfig=global_aws_config()) +function delete_event_bridge_rule_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( "DELETE", - "/prod/multiplexes/$(multiplexId)"; + "/prod/eventbridge-rule-templates/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_multiplex( - multiplexId, +function delete_event_bridge_rule_template( + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "DELETE", - "/prod/multiplexes/$(multiplexId)", + "/prod/eventbridge-rule-templates/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -691,35 +996,35 @@ function delete_multiplex( end """ - delete_multiplex_program(multiplex_id, program_name) - delete_multiplex_program(multiplex_id, program_name, params::Dict{String,<:Any}) + delete_event_bridge_rule_template_group(identifier) + delete_event_bridge_rule_template_group(identifier, params::Dict{String,<:Any}) -Delete a program from a multiplex. +Deletes an eventbridge rule template group. You must detach this group from all signal maps +and ensure its existing templates are moved to another group or deleted. # Arguments -- `multiplex_id`: The ID of the multiplex that the program belongs to. -- `program_name`: The multiplex program name. +- `identifier`: An eventbridge rule template group's identifier. Can be either be its id or + current name. """ -function delete_multiplex_program( - multiplexId, programName; aws_config::AbstractAWSConfig=global_aws_config() +function delete_event_bridge_rule_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return medialive( "DELETE", - "/prod/multiplexes/$(multiplexId)/programs/$(programName)"; + "/prod/eventbridge-rule-template-groups/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_multiplex_program( - multiplexId, - programName, +function delete_event_bridge_rule_template_group( + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "DELETE", - "/prod/multiplexes/$(multiplexId)/programs/$(programName)", + "/prod/eventbridge-rule-template-groups/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -727,33 +1032,29 @@ function delete_multiplex_program( end """ - delete_reservation(reservation_id) - delete_reservation(reservation_id, params::Dict{String,<:Any}) + delete_input(input_id) + delete_input(input_id, params::Dict{String,<:Any}) -Delete an expired reservation. +Deletes the input end point # Arguments -- `reservation_id`: Unique reservation ID, e.g. '1234567' +- `input_id`: Unique ID of the input """ -function delete_reservation( - reservationId; aws_config::AbstractAWSConfig=global_aws_config() -) +function delete_input(inputId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( "DELETE", - "/prod/reservations/$(reservationId)"; + "/prod/inputs/$(inputId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_reservation( - reservationId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_input( + inputId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return medialive( "DELETE", - "/prod/reservations/$(reservationId)", + "/prod/inputs/$(inputId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -761,31 +1062,33 @@ function delete_reservation( end """ - delete_schedule(channel_id) - delete_schedule(channel_id, params::Dict{String,<:Any}) + delete_input_security_group(input_security_group_id) + delete_input_security_group(input_security_group_id, params::Dict{String,<:Any}) -Delete all schedule actions on a channel. +Deletes an Input Security Group # Arguments -- `channel_id`: Id of the channel whose schedule is being deleted. +- `input_security_group_id`: The Input Security Group to delete """ -function delete_schedule(channelId; aws_config::AbstractAWSConfig=global_aws_config()) +function delete_input_security_group( + inputSecurityGroupId; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( "DELETE", - "/prod/channels/$(channelId)/schedule"; + "/prod/inputSecurityGroups/$(inputSecurityGroupId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_schedule( - channelId, +function delete_input_security_group( + inputSecurityGroupId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "DELETE", - "/prod/channels/$(channelId)/schedule", + "/prod/inputSecurityGroups/$(inputSecurityGroupId)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -793,10 +1096,176 @@ function delete_schedule( end """ - delete_tags(resource-arn, tag_keys) - delete_tags(resource-arn, tag_keys, params::Dict{String,<:Any}) + delete_multiplex(multiplex_id) + delete_multiplex(multiplex_id, params::Dict{String,<:Any}) -Removes tags for a resource +Delete a multiplex. The multiplex must be idle. + +# Arguments +- `multiplex_id`: The ID of the multiplex. + +""" +function delete_multiplex(multiplexId; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "DELETE", + "/prod/multiplexes/$(multiplexId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_multiplex( + multiplexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "DELETE", + "/prod/multiplexes/$(multiplexId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_multiplex_program(multiplex_id, program_name) + delete_multiplex_program(multiplex_id, program_name, params::Dict{String,<:Any}) + +Delete a program from a multiplex. + +# Arguments +- `multiplex_id`: The ID of the multiplex that the program belongs to. +- `program_name`: The multiplex program name. + +""" +function delete_multiplex_program( + multiplexId, programName; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "DELETE", + "/prod/multiplexes/$(multiplexId)/programs/$(programName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_multiplex_program( + multiplexId, + programName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "DELETE", + "/prod/multiplexes/$(multiplexId)/programs/$(programName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_reservation(reservation_id) + delete_reservation(reservation_id, params::Dict{String,<:Any}) + +Delete an expired reservation. + +# Arguments +- `reservation_id`: Unique reservation ID, e.g. '1234567' + +""" +function delete_reservation( + reservationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "DELETE", + "/prod/reservations/$(reservationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_reservation( + reservationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "DELETE", + "/prod/reservations/$(reservationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_schedule(channel_id) + delete_schedule(channel_id, params::Dict{String,<:Any}) + +Delete all schedule actions on a channel. + +# Arguments +- `channel_id`: Id of the channel whose schedule is being deleted. + +""" +function delete_schedule(channelId; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "DELETE", + "/prod/channels/$(channelId)/schedule"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_schedule( + channelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "DELETE", + "/prod/channels/$(channelId)/schedule", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_signal_map(identifier) + delete_signal_map(identifier, params::Dict{String,<:Any}) + +Deletes the specified signal map. + +# Arguments +- `identifier`: A signal map's identifier. Can be either be its id or current name. + +""" +function delete_signal_map(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "DELETE", + "/prod/signal-maps/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_signal_map( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "DELETE", + "/prod/signal-maps/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_tags(resource-arn, tag_keys) + delete_tags(resource-arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags for a resource # Arguments - `resource-arn`: @@ -829,6 +1298,33 @@ function delete_tags( ) end +""" + describe_account_configuration() + describe_account_configuration(params::Dict{String,<:Any}) + +Describe account configuration + +""" +function describe_account_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", + "/prod/accountConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_account_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/accountConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_channel(channel_id) describe_channel(channel_id, params::Dict{String,<:Any}) @@ -1173,98 +1669,116 @@ function describe_schedule( end """ - list_channels() - list_channels(params::Dict{String,<:Any}) + describe_thumbnails(channel_id, pipeline_id, thumbnail_type) + describe_thumbnails(channel_id, pipeline_id, thumbnail_type, params::Dict{String,<:Any}) -Produces list of channels that have been created +Describe the latest thumbnails data. + +# Arguments +- `channel_id`: Unique ID of the channel +- `pipeline_id`: Pipeline ID (\"0\" or \"1\") +- `thumbnail_type`: thumbnail type -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: """ -function list_channels(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_thumbnails( + channelId, pipelineId, thumbnailType; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( - "GET", "/prod/channels"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "GET", + "/prod/channels/$(channelId)/thumbnails", + Dict{String,Any}("pipelineId" => pipelineId, "thumbnailType" => thumbnailType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function list_channels( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function describe_thumbnails( + channelId, + pipelineId, + thumbnailType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "GET", - "/prod/channels", - params; + "/prod/channels/$(channelId)/thumbnails", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "pipelineId" => pipelineId, "thumbnailType" => thumbnailType + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - list_input_device_transfers(transfer_type) - list_input_device_transfers(transfer_type, params::Dict{String,<:Any}) + get_cloud_watch_alarm_template(identifier) + get_cloud_watch_alarm_template(identifier, params::Dict{String,<:Any}) -List input devices that are currently being transferred. List input devices that you are -transferring from your AWS account or input devices that another AWS account is -transferring to you. +Retrieves the specified cloudwatch alarm template. # Arguments -- `transfer_type`: +- `identifier`: A cloudwatch alarm template's identifier. Can be either be its id or + current name. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: """ -function list_input_device_transfers( - transferType; aws_config::AbstractAWSConfig=global_aws_config() +function get_cloud_watch_alarm_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return medialive( "GET", - "/prod/inputDeviceTransfers", - Dict{String,Any}("transferType" => transferType); + "/prod/cloudwatch-alarm-templates/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_input_device_transfers( - transferType, +function get_cloud_watch_alarm_template( + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "GET", - "/prod/inputDeviceTransfers", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("transferType" => transferType), params) - ); + "/prod/cloudwatch-alarm-templates/$(identifier)", + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - list_input_devices() - list_input_devices(params::Dict{String,<:Any}) + get_cloud_watch_alarm_template_group(identifier) + get_cloud_watch_alarm_template_group(identifier, params::Dict{String,<:Any}) -List input devices +Retrieves the specified cloudwatch alarm template group. + +# Arguments +- `identifier`: A cloudwatch alarm template group's identifier. Can be either be its id or + current name. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: """ -function list_input_devices(; aws_config::AbstractAWSConfig=global_aws_config()) +function get_cloud_watch_alarm_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( - "GET", "/prod/inputDevices"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "GET", + "/prod/cloudwatch-alarm-template-groups/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function list_input_devices( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function get_cloud_watch_alarm_template_group( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "GET", - "/prod/inputDevices", + "/prod/cloudwatch-alarm-template-groups/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1272,16 +1786,369 @@ function list_input_devices( end """ - list_input_security_groups() - list_input_security_groups(params::Dict{String,<:Any}) + get_event_bridge_rule_template(identifier) + get_event_bridge_rule_template(identifier, params::Dict{String,<:Any}) -Produces a list of Input Security Groups for an account +Retrieves the specified eventbridge rule template. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: -""" +# Arguments +- `identifier`: An eventbridge rule template's identifier. Can be either be its id or + current name. + +""" +function get_event_bridge_rule_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-templates/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_event_bridge_rule_template( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "GET", + "/prod/eventbridge-rule-templates/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_event_bridge_rule_template_group(identifier) + get_event_bridge_rule_template_group(identifier, params::Dict{String,<:Any}) + +Retrieves the specified eventbridge rule template group. + +# Arguments +- `identifier`: An eventbridge rule template group's identifier. Can be either be its id or + current name. + +""" +function get_event_bridge_rule_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-template-groups/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_event_bridge_rule_template_group( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "GET", + "/prod/eventbridge-rule-template-groups/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_signal_map(identifier) + get_signal_map(identifier, params::Dict{String,<:Any}) + +Retrieves the specified signal map. + +# Arguments +- `identifier`: A signal map's identifier. Can be either be its id or current name. + +""" +function get_signal_map(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", + "/prod/signal-maps/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_signal_map( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "GET", + "/prod/signal-maps/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_channels() + list_channels(params::Dict{String,<:Any}) + +Produces list of channels that have been created + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: +""" +function list_channels(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", "/prod/channels"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_channels( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/channels", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cloud_watch_alarm_template_groups() + list_cloud_watch_alarm_template_groups(params::Dict{String,<:Any}) + +Lists cloudwatch alarm template groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: A token used to retrieve the next set of results in paginated list + responses. +- `"scope"`: Represents the scope of a resource, with options for all scopes, AWS provided + resources, or local resources. +- `"signalMapIdentifier"`: A signal map's identifier. Can be either be its id or current + name. +""" +function list_cloud_watch_alarm_template_groups(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/cloudwatch-alarm-template-groups"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cloud_watch_alarm_template_groups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/cloudwatch-alarm-template-groups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_cloud_watch_alarm_templates() + list_cloud_watch_alarm_templates(params::Dict{String,<:Any}) + +Lists cloudwatch alarm templates. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"groupIdentifier"`: A cloudwatch alarm template group's identifier. Can be either be its + id or current name. +- `"maxResults"`: +- `"nextToken"`: A token used to retrieve the next set of results in paginated list + responses. +- `"scope"`: Represents the scope of a resource, with options for all scopes, AWS provided + resources, or local resources. +- `"signalMapIdentifier"`: A signal map's identifier. Can be either be its id or current + name. +""" +function list_cloud_watch_alarm_templates(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/cloudwatch-alarm-templates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cloud_watch_alarm_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/cloudwatch-alarm-templates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_event_bridge_rule_template_groups() + list_event_bridge_rule_template_groups(params::Dict{String,<:Any}) + +Lists eventbridge rule template groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: A token used to retrieve the next set of results in paginated list + responses. +- `"signalMapIdentifier"`: A signal map's identifier. Can be either be its id or current + name. +""" +function list_event_bridge_rule_template_groups(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-template-groups"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_event_bridge_rule_template_groups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-template-groups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_event_bridge_rule_templates() + list_event_bridge_rule_templates(params::Dict{String,<:Any}) + +Lists eventbridge rule templates. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"groupIdentifier"`: An eventbridge rule template group's identifier. Can be either be + its id or current name. +- `"maxResults"`: +- `"nextToken"`: A token used to retrieve the next set of results in paginated list + responses. +- `"signalMapIdentifier"`: A signal map's identifier. Can be either be its id or current + name. +""" +function list_event_bridge_rule_templates(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-templates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_event_bridge_rule_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/eventbridge-rule-templates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_input_device_transfers(transfer_type) + list_input_device_transfers(transfer_type, params::Dict{String,<:Any}) + +List input devices that are currently being transferred. List input devices that you are +transferring from your AWS account or input devices that another AWS account is +transferring to you. + +# Arguments +- `transfer_type`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: +""" +function list_input_device_transfers( + transferType; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/inputDeviceTransfers", + Dict{String,Any}("transferType" => transferType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_input_device_transfers( + transferType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "GET", + "/prod/inputDeviceTransfers", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("transferType" => transferType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_input_devices() + list_input_devices(params::Dict{String,<:Any}) + +List input devices + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: +""" +function list_input_devices(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", "/prod/inputDevices"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_input_devices( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/inputDevices", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_input_security_groups() + list_input_security_groups(params::Dict{String,<:Any}) + +Produces a list of Input Security Groups for an account + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: +- `"nextToken"`: +""" function list_input_security_groups(; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( "GET", @@ -1471,6 +2338,39 @@ function list_reservations( ) end +""" + list_signal_maps() + list_signal_maps(params::Dict{String,<:Any}) + +Lists signal maps. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"cloudWatchAlarmTemplateGroupIdentifier"`: A cloudwatch alarm template group's + identifier. Can be either be its id or current name. +- `"eventBridgeRuleTemplateGroupIdentifier"`: An eventbridge rule template group's + identifier. Can be either be its id or current name. +- `"maxResults"`: +- `"nextToken"`: A token used to retrieve the next set of results in paginated list + responses. +""" +function list_signal_maps(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", "/prod/signal-maps"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_signal_maps( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/signal-maps", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource-arn) list_tags_for_resource(resource-arn, params::Dict{String,<:Any}) @@ -1559,42 +2459,147 @@ function purchase_offering( end """ - reboot_input_device(input_device_id) - reboot_input_device(input_device_id, params::Dict{String,<:Any}) + reboot_input_device(input_device_id) + reboot_input_device(input_device_id, params::Dict{String,<:Any}) + +Send a reboot command to the specified input device. The device will begin rebooting within +a few seconds of sending the command. When the reboot is complete, the device’s +connection status will change to connected. + +# Arguments +- `input_device_id`: The unique ID of the input device to reboot. For example, + hd-123456789abcdef. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Force a reboot of an input device. If the device is streaming, it will stop + streaming and begin rebooting within a few seconds of sending the command. If the device + was streaming prior to the reboot, the device will resume streaming when the reboot + completes. +""" +function reboot_input_device( + inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/reboot"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reboot_input_device( + inputDeviceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/reboot", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + reject_input_device_transfer(input_device_id) + reject_input_device_transfer(input_device_id, params::Dict{String,<:Any}) + +Reject the transfer of the specified input device to your AWS account. + +# Arguments +- `input_device_id`: The unique ID of the input device to reject. For example, + hd-123456789abcdef. + +""" +function reject_input_device_transfer( + inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/reject"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_input_device_transfer( + inputDeviceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/reject", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + restart_channel_pipelines(channel_id) + restart_channel_pipelines(channel_id, params::Dict{String,<:Any}) + +Restart pipelines in one channel that is currently running. + +# Arguments +- `channel_id`: ID of channel + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"pipelineIds"`: An array of pipelines to restart in this channel. Format PIPELINE_0 or + PIPELINE_1. +""" +function restart_channel_pipelines( + channelId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/channels/$(channelId)/restartChannelPipelines"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function restart_channel_pipelines( + channelId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/channels/$(channelId)/restartChannelPipelines", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_channel(channel_id) + start_channel(channel_id, params::Dict{String,<:Any}) -Send a reboot command to the specified input device. The device will begin rebooting within -a few seconds of sending the command. When the reboot is complete, the device’s -connection status will change to connected. +Starts an existing channel # Arguments -- `input_device_id`: The unique ID of the input device to reboot. For example, - hd-123456789abcdef. +- `channel_id`: A request to start a channel -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"force"`: Force a reboot of an input device. If the device is streaming, it will stop - streaming and begin rebooting within a few seconds of sending the command. If the device - was streaming prior to the reboot, the device will resume streaming when the reboot - completes. """ -function reboot_input_device( - inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config() -) +function start_channel(channelId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( "POST", - "/prod/inputDevices/$(inputDeviceId)/reboot"; + "/prod/channels/$(channelId)/start"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function reboot_input_device( - inputDeviceId, +function start_channel( + channelId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "POST", - "/prod/inputDevices/$(inputDeviceId)/reboot", + "/prod/channels/$(channelId)/start", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1602,34 +2607,33 @@ function reboot_input_device( end """ - reject_input_device_transfer(input_device_id) - reject_input_device_transfer(input_device_id, params::Dict{String,<:Any}) + start_delete_monitor_deployment(identifier) + start_delete_monitor_deployment(identifier, params::Dict{String,<:Any}) -Reject the transfer of the specified input device to your AWS account. +Initiates a deployment to delete the monitor of the specified signal map. # Arguments -- `input_device_id`: The unique ID of the input device to reject. For example, - hd-123456789abcdef. +- `identifier`: A signal map's identifier. Can be either be its id or current name. """ -function reject_input_device_transfer( - inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config() +function start_delete_monitor_deployment( + identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return medialive( - "POST", - "/prod/inputDevices/$(inputDeviceId)/reject"; + "DELETE", + "/prod/signal-maps/$(identifier)/monitor-deployment"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function reject_input_device_transfer( - inputDeviceId, +function start_delete_monitor_deployment( + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( - "POST", - "/prod/inputDevices/$(inputDeviceId)/reject", + "DELETE", + "/prod/signal-maps/$(identifier)/monitor-deployment", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1637,31 +2641,36 @@ function reject_input_device_transfer( end """ - start_channel(channel_id) - start_channel(channel_id, params::Dict{String,<:Any}) + start_input_device(input_device_id) + start_input_device(input_device_id, params::Dict{String,<:Any}) -Starts an existing channel +Start an input device that is attached to a MediaConnect flow. (There is no need to start a +device that is attached to a MediaLive input; MediaLive starts the device when the channel +starts.) # Arguments -- `channel_id`: A request to start a channel +- `input_device_id`: The unique ID of the input device to start. For example, + hd-123456789abcdef. """ -function start_channel(channelId; aws_config::AbstractAWSConfig=global_aws_config()) +function start_input_device( + inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config() +) return medialive( "POST", - "/prod/channels/$(channelId)/start"; + "/prod/inputDevices/$(inputDeviceId)/start"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function start_channel( - channelId, +function start_input_device( + inputDeviceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return medialive( "POST", - "/prod/channels/$(channelId)/start", + "/prod/inputDevices/$(inputDeviceId)/start", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1709,6 +2718,43 @@ function start_input_device_maintenance_window( ) end +""" + start_monitor_deployment(identifier) + start_monitor_deployment(identifier, params::Dict{String,<:Any}) + +Initiates a deployment to deploy the latest monitor of the specified signal map. + +# Arguments +- `identifier`: A signal map's identifier. Can be either be its id or current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dryRun"`: +""" +function start_monitor_deployment( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "POST", + "/prod/signal-maps/$(identifier)/monitor-deployment"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_monitor_deployment( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/signal-maps/$(identifier)/monitor-deployment", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_multiplex(multiplex_id) start_multiplex(multiplex_id, params::Dict{String,<:Any}) @@ -1742,6 +2788,52 @@ function start_multiplex( ) end +""" + start_update_signal_map(identifier) + start_update_signal_map(identifier, params::Dict{String,<:Any}) + +Initiates an update for the specified signal map. Will discover a new signal map if a +changed discoveryEntryPointArn is provided. + +# Arguments +- `identifier`: A signal map's identifier. Can be either be its id or current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"cloudWatchAlarmTemplateGroupIdentifiers"`: +- `"description"`: A resource's optional description. +- `"discoveryEntryPointArn"`: A top-level supported AWS resource ARN to discovery a signal + map from. +- `"eventBridgeRuleTemplateGroupIdentifiers"`: +- `"forceRediscovery"`: If true, will force a rediscovery of a signal map if an unchanged + discoveryEntryPointArn is provided. +- `"name"`: A resource's name. Names must be unique within the scope of a resource type in + a specific region. +""" +function start_update_signal_map( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PATCH", + "/prod/signal-maps/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_update_signal_map( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "PATCH", + "/prod/signal-maps/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_channel(channel_id) stop_channel(channel_id, params::Dict{String,<:Any}) @@ -1774,6 +2866,41 @@ function stop_channel( ) end +""" + stop_input_device(input_device_id) + stop_input_device(input_device_id, params::Dict{String,<:Any}) + +Stop an input device that is attached to a MediaConnect flow. (There is no need to stop a +device that is attached to a MediaLive input; MediaLive automatically stops the device when +the channel stops.) + +# Arguments +- `input_device_id`: The unique ID of the input device to stop. For example, + hd-123456789abcdef. + +""" +function stop_input_device(inputDeviceId; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_input_device( + inputDeviceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "POST", + "/prod/inputDevices/$(inputDeviceId)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_multiplex(multiplex_id) stop_multiplex(multiplex_id, params::Dict{String,<:Any}) @@ -1847,6 +2974,36 @@ function transfer_input_device( ) end +""" + update_account_configuration() + update_account_configuration(params::Dict{String,<:Any}) + +Update account configuration + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountConfiguration"`: +""" +function update_account_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "PUT", + "/prod/accountConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_account_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PUT", + "/prod/accountConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_channel(channel_id) update_channel(channel_id, params::Dict{String,<:Any}) @@ -1934,6 +3091,180 @@ function update_channel_class( ) end +""" + update_cloud_watch_alarm_template(identifier) + update_cloud_watch_alarm_template(identifier, params::Dict{String,<:Any}) + +Updates the specified cloudwatch alarm template. + +# Arguments +- `identifier`: A cloudwatch alarm template's identifier. Can be either be its id or + current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"comparisonOperator"`: +- `"datapointsToAlarm"`: The number of datapoints within the evaluation period that must be + breaching to trigger the alarm. +- `"description"`: A resource's optional description. +- `"evaluationPeriods"`: The number of periods over which data is compared to the specified + threshold. +- `"groupIdentifier"`: A cloudwatch alarm template group's identifier. Can be either be its + id or current name. +- `"metricName"`: The name of the metric associated with the alarm. Must be compatible with + targetResourceType. +- `"name"`: A resource's name. Names must be unique within the scope of a resource type in + a specific region. +- `"period"`: The period, in seconds, over which the specified statistic is applied. +- `"statistic"`: +- `"targetResourceType"`: +- `"threshold"`: The threshold value to compare with the specified statistic. +- `"treatMissingData"`: +""" +function update_cloud_watch_alarm_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PATCH", + "/prod/cloudwatch-alarm-templates/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_cloud_watch_alarm_template( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "PATCH", + "/prod/cloudwatch-alarm-templates/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_cloud_watch_alarm_template_group(identifier) + update_cloud_watch_alarm_template_group(identifier, params::Dict{String,<:Any}) + +Updates the specified cloudwatch alarm template group. + +# Arguments +- `identifier`: A cloudwatch alarm template group's identifier. Can be either be its id or + current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +""" +function update_cloud_watch_alarm_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PATCH", + "/prod/cloudwatch-alarm-template-groups/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_cloud_watch_alarm_template_group( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "PATCH", + "/prod/cloudwatch-alarm-template-groups/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_event_bridge_rule_template(identifier) + update_event_bridge_rule_template(identifier, params::Dict{String,<:Any}) + +Updates the specified eventbridge rule template. + +# Arguments +- `identifier`: An eventbridge rule template's identifier. Can be either be its id or + current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +- `"eventTargets"`: +- `"eventType"`: +- `"groupIdentifier"`: An eventbridge rule template group's identifier. Can be either be + its id or current name. +- `"name"`: A resource's name. Names must be unique within the scope of a resource type in + a specific region. +""" +function update_event_bridge_rule_template( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PATCH", + "/prod/eventbridge-rule-templates/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_event_bridge_rule_template( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "PATCH", + "/prod/eventbridge-rule-templates/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_event_bridge_rule_template_group(identifier) + update_event_bridge_rule_template_group(identifier, params::Dict{String,<:Any}) + +Updates the specified eventbridge rule template group. + +# Arguments +- `identifier`: An eventbridge rule template group's identifier. Can be either be its id or + current name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A resource's optional description. +""" +function update_event_bridge_rule_template_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PATCH", + "/prod/eventbridge-rule-template-groups/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_event_bridge_rule_template_group( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "PATCH", + "/prod/eventbridge-rule-template-groups/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_input(input_id) update_input(input_id, params::Dict{String,<:Any}) @@ -1995,6 +3326,7 @@ Updates the parameters for the input device. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"availabilityZone"`: The Availability Zone you want associated with this input device. - `"hdDeviceSettings"`: The settings that you want to apply to the HD input device. - `"name"`: The name that you assigned to this input device (not the unique ID). - `"uhdDeviceSettings"`: The settings that you want to apply to the UHD input device. diff --git a/src/services/mediapackagev2.jl b/src/services/mediapackagev2.jl index 408a61e09b..03b2321ae5 100644 --- a/src/services/mediapackagev2.jl +++ b/src/services/mediapackagev2.jl @@ -26,6 +26,11 @@ channel groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: Enter any descriptive text that helps you to identify the channel. +- `"InputType"`: The input type will be an immutable field which will be used to define + whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to + HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming + specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF + Ingest specification (which defines CMAF segments with optional DASH manifests). - `"tags"`: A comma-separated list of tag key:value pairs that you define. For example: \"Key1\": \"Value1\", \"Key2\": \"Value2\" - `"x-amzn-client-token"`: A unique, case-sensitive token that you provide to ensure the @@ -152,8 +157,10 @@ with each request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DashManifests"`: A DASH manifest configuration. - `"Description"`: Enter any descriptive text that helps you to identify the origin endpoint. +- `"ForceEndpointErrorConfiguration"`: The failover settings for the endpoint. - `"HlsManifests"`: An HTTP live streaming (HLS) manifest configuration. - `"LowLatencyHlsManifests"`: A low-latency HLS manifest configuration. - `"Segment"`: The segment configuration, including the segment name, duration, and other @@ -965,6 +972,9 @@ reflected for a few minutes. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: Any descriptive information that you want to add to the channel for future identification purposes. +- `"x-amzn-update-if-match"`: The expected current Entity Tag (ETag) for the resource. If + the specified ETag does not match the resource's current entity tag, the update request + will be rejected. """ function update_channel( ChannelGroupName, ChannelName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1008,6 +1018,9 @@ reflected for a few minutes. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: Any descriptive information that you want to add to the channel group for future identification purposes. +- `"x-amzn-update-if-match"`: The expected current Entity Tag (ETag) for the resource. If + the specified ETag does not match the resource's current entity tag, the update request + will be rejected. """ function update_channel_group( ChannelGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1055,8 +1068,10 @@ make that impact the video output may not be reflected for a few minutes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DashManifests"`: A DASH manifest configuration. - `"Description"`: Any descriptive information that you want to add to the origin endpoint for future identification purposes. +- `"ForceEndpointErrorConfiguration"`: The failover settings for the endpoint. - `"HlsManifests"`: An HTTP live streaming (HLS) manifest configuration. - `"LowLatencyHlsManifests"`: A low-latency HLS manifest configuration. - `"Segment"`: The segment configuration, including the segment name, duration, and other @@ -1065,6 +1080,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window. The maximum startover window is 1,209,600 seconds (14 days). +- `"x-amzn-update-if-match"`: The expected current Entity Tag (ETag) for the resource. If + the specified ETag does not match the resource's current entity tag, the update request + will be rejected. """ function update_origin_endpoint( ChannelGroupName, diff --git a/src/services/mediatailor.jl b/src/services/mediatailor.jl index deccd935c4..9f8e4b04ef 100644 --- a/src/services/mediatailor.jl +++ b/src/services/mediatailor.jl @@ -120,10 +120,13 @@ the MediaTailor User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Audiences"`: The list of audiences defined in channel. - `"FillerSlate"`: The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode. - `"Tier"`: The tier of the channel. +- `"TimeShiftConfiguration"`: The time-shifted viewing configuration you want to associate + to the channel. - `"tags"`: The tags to assign to the channel. Tags are key-value pairs that you can associate with Amazon resources to help with organization, access control, and cost tracking. For more information, see Tagging AWS Elemental MediaTailor Resources. @@ -296,6 +299,7 @@ programs in the MediaTailor User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdBreaks"`: The ad break configuration settings. +- `"AudienceMedia"`: The list of AudienceMedia defined in program. - `"LiveSourceName"`: The name of the LiveSource for this Program. - `"VodSourceName"`: The name that's used to refer to a VOD source. """ @@ -959,6 +963,7 @@ Retrieves information about your channel's schedule. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"audience"`: The single audience for GetChannelScheduleRequest. - `"durationMinutes"`: The duration in minutes of the channel schedule. - `"maxResults"`: The maximum number of channel schedules that you want MediaTailor to return in response to the current request. If there are more than MaxResults channel @@ -1437,6 +1442,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ConfigurationAliases"`: The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables. - `"DashConfiguration"`: The configuration for DASH content. +- `"InsertionMode"`: The setting that controls whether players can use stitched or guided + ad insertion. The default, STITCHED_ONLY, forces all player sessions to use stitched + (server-side) ad insertion. Choosing PLAYER_SELECT allows players to select either stitched + or guided ad insertion at session-initialization time. The default for players that do not + specify an insertion mode is stitched. - `"LivePreRollConfiguration"`: The configuration for pre-roll ad insertion. - `"ManifestProcessingRules"`: The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor. @@ -1639,9 +1649,12 @@ the MediaTailor User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Audiences"`: The list of audiences defined in channel. - `"FillerSlate"`: The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode. +- `"TimeShiftConfiguration"`: The time-shifted viewing configuration you want to associate + to the channel. """ function update_channel( ChannelName, Outputs; aws_config::AbstractAWSConfig=global_aws_config() @@ -1732,6 +1745,7 @@ Updates a program within a channel. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdBreaks"`: The ad break configuration settings. +- `"AudienceMedia"`: The list of AudienceMedia defined in program. """ function update_program( ChannelName, diff --git a/src/services/medical_imaging.jl b/src/services/medical_imaging.jl new file mode 100644 index 0000000000..7467de1fc8 --- /dev/null +++ b/src/services/medical_imaging.jl @@ -0,0 +1,751 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: medical_imaging +using AWS.Compat +using AWS.UUIDs + +""" + copy_image_set(copy_image_set_information, datastore_id, source_image_set_id) + copy_image_set(copy_image_set_information, datastore_id, source_image_set_id, params::Dict{String,<:Any}) + +Copy an image set. + +# Arguments +- `copy_image_set_information`: Copy image set information. +- `datastore_id`: The data store identifier. +- `source_image_set_id`: The source image set identifier. + +""" +function copy_image_set( + copyImageSetInformation, + datastoreId, + sourceImageSetId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(sourceImageSetId)/copyImageSet", + Dict{String,Any}("copyImageSetInformation" => copyImageSetInformation); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_image_set( + copyImageSetInformation, + datastoreId, + sourceImageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(sourceImageSetId)/copyImageSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("copyImageSetInformation" => copyImageSetInformation), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_datastore(client_token) + create_datastore(client_token, params::Dict{String,<:Any}) + +Create a data store. + +# Arguments +- `client_token`: A unique identifier for API idempotency. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datastoreName"`: The data store name. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) assigned to the Key Management Service + (KMS) key for accessing encrypted data. +- `"tags"`: The tags provided when creating a data store. +""" +function create_datastore(clientToken; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/datastore", + Dict{String,Any}("clientToken" => clientToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_datastore( + clientToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => clientToken), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_datastore(datastore_id) + delete_datastore(datastore_id, params::Dict{String,<:Any}) + +Delete a data store. Before a data store can be deleted, you must first delete all image +sets within it. + +# Arguments +- `datastore_id`: The data store identifier. + +""" +function delete_datastore(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "DELETE", + "/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_datastore( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "DELETE", + "/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_image_set(datastore_id, image_set_id) + delete_image_set(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Delete an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +""" +function delete_image_set( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/deleteImageSet"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_image_set( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/deleteImageSet", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_datastore(datastore_id) + get_datastore(datastore_id, params::Dict{String,<:Any}) + +Get data store properties. + +# Arguments +- `datastore_id`: The data store identifier. + +""" +function get_datastore(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "GET", + "/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_datastore( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_dicomimport_job(datastore_id, job_id) + get_dicomimport_job(datastore_id, job_id, params::Dict{String,<:Any}) + +Get the import job properties to learn more about the job or job progress. The jobStatus +refers to the execution of the import job. Therefore, an import job can return a jobStatus +as COMPLETED even if validation issues are discovered during the import process. If a +jobStatus returns as COMPLETED, we still recommend you review the output manifests written +to S3, as they provide details on the success or failure of individual P10 object imports. + +# Arguments +- `datastore_id`: The data store identifier. +- `job_id`: The import job identifier. + +""" +function get_dicomimport_job( + datastoreId, jobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/getDICOMImportJob/datastore/$(datastoreId)/job/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_dicomimport_job( + datastoreId, + jobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/getDICOMImportJob/datastore/$(datastoreId)/job/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_frame(datastore_id, image_frame_information, image_set_id) + get_image_frame(datastore_id, image_frame_information, image_set_id, params::Dict{String,<:Any}) + +Get an image frame (pixel data) for an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_frame_information`: Information about the image frame (pixel data) identifier. +- `image_set_id`: The image set identifier. + +""" +function get_image_frame( + datastoreId, + imageFrameInformation, + imageSetId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageFrame", + Dict{String,Any}("imageFrameInformation" => imageFrameInformation); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_frame( + datastoreId, + imageFrameInformation, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageFrame", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("imageFrameInformation" => imageFrameInformation), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_set(datastore_id, image_set_id) + get_image_set(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Get image set properties. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"version"`: The image set version identifier. +""" +function get_image_set( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSet"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_set( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSet", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_set_metadata(datastore_id, image_set_id) + get_image_set_metadata(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Get metadata attributes for an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"version"`: The image set version identifier. +""" +function get_image_set_metadata( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSetMetadata"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_set_metadata( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSetMetadata", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_datastores() + list_datastores(params::Dict{String,<:Any}) + +List data stores. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datastoreStatus"`: The data store status. +- `"maxResults"`: Valid Range: Minimum value of 1. Maximum value of 50. +- `"nextToken"`: The pagination token used to request the list of data stores on the next + page. +""" +function list_datastores(; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "GET", "/datastore"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_datastores( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", "/datastore", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_dicomimport_jobs(datastore_id) + list_dicomimport_jobs(datastore_id, params::Dict{String,<:Any}) + +List import jobs created for a specific data store. + +# Arguments +- `datastore_id`: The data store identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"jobStatus"`: The filters for listing import jobs based on status. +- `"maxResults"`: The max results count. The upper bound is determined by load testing. +- `"nextToken"`: The pagination token used to request the list of import jobs on the next + page. +""" +function list_dicomimport_jobs( + datastoreId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/listDICOMImportJobs/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_dicomimport_jobs( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/listDICOMImportJobs/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_image_set_versions(datastore_id, image_set_id) + list_image_set_versions(datastore_id, image_set_id, params::Dict{String,<:Any}) + +List image set versions. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The max results count. +- `"nextToken"`: The pagination token used to request the list of image set versions on the + next page. +""" +function list_image_set_versions( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/listImageSetVersions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_image_set_versions( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/listImageSetVersions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all tags associated with a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource to list + tags for. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_image_sets(datastore_id) + search_image_sets(datastore_id, params::Dict{String,<:Any}) + +Search image sets based on defined input attributes. SearchImageSets accepts a single +search query parameter and returns a paginated response of all image sets that have the +matching criteria. All date range queries must be input as (lowerBound, upperBound). By +default, SearchImageSets uses the updatedAt field for sorting in descending order from +newest to oldest. + +# Arguments +- `datastore_id`: The identifier of the data store where the image sets reside. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that can be returned in a search. +- `"nextToken"`: The token used for pagination of results returned in the response. Use the + token returned from the previous request to continue results where the previous request + ended. +- `"searchCriteria"`: The search criteria that filters by applying a maximum of 1 item to + SearchByAttribute. +""" +function search_image_sets(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/searchImageSets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_image_sets( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/searchImageSets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_dicomimport_job(client_token, data_access_role_arn, datastore_id, input_s3_uri, output_s3_uri) + start_dicomimport_job(client_token, data_access_role_arn, datastore_id, input_s3_uri, output_s3_uri, params::Dict{String,<:Any}) + +Start importing bulk data into an ACTIVE data store. The import job imports DICOM P10 files +found in the S3 prefix specified by the inputS3Uri parameter. The import job stores +processing results in the file specified by the outputS3Uri parameter. + +# Arguments +- `client_token`: A unique identifier for API idempotency. +- `data_access_role_arn`: The Amazon Resource Name (ARN) of the IAM role that grants + permission to access medical imaging resources. +- `datastore_id`: The data store identifier. +- `input_s3_uri`: The input prefix path for the S3 bucket that contains the DICOM files to + be imported. +- `output_s3_uri`: The output prefix of the S3 bucket to upload the results of the DICOM + import job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"inputOwnerAccountId"`: The account ID of the source S3 bucket owner. +- `"jobName"`: The import job name. +""" +function start_dicomimport_job( + clientToken, + dataAccessRoleArn, + datastoreId, + inputS3Uri, + outputS3Uri; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/startDICOMImportJob/datastore/$(datastoreId)", + Dict{String,Any}( + "clientToken" => clientToken, + "dataAccessRoleArn" => dataAccessRoleArn, + "inputS3Uri" => inputS3Uri, + "outputS3Uri" => outputS3Uri, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_dicomimport_job( + clientToken, + dataAccessRoleArn, + datastoreId, + inputS3Uri, + outputS3Uri, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/startDICOMImportJob/datastore/$(datastoreId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "dataAccessRoleArn" => dataAccessRoleArn, + "inputS3Uri" => inputS3Uri, + "outputS3Uri" => outputS3Uri, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds a user-specifed key and value tag to a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource that tags + are being added to. +- `tags`: The user-specified key and value tag pairs added to a medical imaging resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource that tags + are being removed from. +- `tag_keys`: The keys for the tags to be removed from the medical imaging resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_image_set_metadata(datastore_id, image_set_id, latest_version, update_image_set_metadata_updates) + update_image_set_metadata(datastore_id, image_set_id, latest_version, update_image_set_metadata_updates, params::Dict{String,<:Any}) + +Update image set metadata attributes. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. +- `latest_version`: The latest image set version identifier. +- `update_image_set_metadata_updates`: Update image set metadata updates. + +""" +function update_image_set_metadata( + datastoreId, + imageSetId, + latestVersion, + updateImageSetMetadataUpdates; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/updateImageSetMetadata", + Dict{String,Any}( + "latestVersion" => latestVersion, + "updateImageSetMetadataUpdates" => updateImageSetMetadataUpdates, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_image_set_metadata( + datastoreId, + imageSetId, + latestVersion, + updateImageSetMetadataUpdates, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/updateImageSetMetadata", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "latestVersion" => latestVersion, + "updateImageSetMetadataUpdates" => updateImageSetMetadataUpdates, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/mgn.jl b/src/services/mgn.jl index e8c9a0c63b..94253284c6 100644 --- a/src/services/mgn.jl +++ b/src/services/mgn.jl @@ -13,6 +13,9 @@ Archive application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function archive_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -50,6 +53,9 @@ Archive wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function archive_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -82,6 +88,9 @@ Associate applications to wave. - `application_ids`: Application IDs list. - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function associate_applications( applicationIDs, waveID; aws_config::AbstractAWSConfig=global_aws_config() @@ -125,6 +134,9 @@ Associate source servers to application. - `application_id`: Application ID. - `source_server_ids`: Source server IDs list. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function associate_source_servers( applicationID, sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config() @@ -175,6 +187,9 @@ if the Source Server is already launchable (dataReplicationInfo.lagDuration is n - `source_server_id`: The request to change the source server migration lifecycle state by source server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: The request to change the source server migration account ID. """ function change_server_life_cycle_state( lifeCycle, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -221,6 +236,7 @@ Create application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Application description. - `"tags"`: Application tags. """ @@ -245,6 +261,53 @@ function create_application( ) end +""" + create_connector(name, ssm_instance_id) + create_connector(name, ssm_instance_id, params::Dict{String,<:Any}) + +Create Connector. + +# Arguments +- `name`: Create Connector request name. +- `ssm_instance_id`: Create Connector request SSM instance ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ssmCommandConfig"`: Create Connector request SSM command config. +- `"tags"`: Create Connector request tags. +""" +function create_connector( + name, ssmInstanceID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/CreateConnector", + Dict{String,Any}("name" => name, "ssmInstanceID" => ssmInstanceID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_connector( + name, + ssmInstanceID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/CreateConnector", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "ssmInstanceID" => ssmInstanceID), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_launch_configuration_template() create_launch_configuration_template(params::Dict{String,<:Any}) @@ -325,6 +388,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ebsEncryptionKeyArn"`: Request to configure an EBS encryption key during Replication Settings template creation. - `"tags"`: Request to configure tags during Replication Settings template creation. +- `"useFipsEndpoint"`: Request to use Fips Endpoint during Replication Settings template + creation. """ function create_replication_configuration_template( associateDefaultSecurityGroup, @@ -414,6 +479,7 @@ Create wave. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Wave description. - `"tags"`: Wave tags. """ @@ -447,6 +513,9 @@ Delete application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function delete_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -475,6 +544,41 @@ function delete_application( ) end +""" + delete_connector(connector_id) + delete_connector(connector_id, params::Dict{String,<:Any}) + +Delete Connector. + +# Arguments +- `connector_id`: Delete Connector request connector ID. + +""" +function delete_connector(connectorID; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/DeleteConnector", + Dict{String,Any}("connectorID" => connectorID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_connector( + connectorID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/DeleteConnector", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("connectorID" => connectorID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_job(job_id) delete_job(job_id, params::Dict{String,<:Any}) @@ -484,6 +588,9 @@ Deletes a single Job by ID. # Arguments - `job_id`: Request to delete Job from service by Job ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to delete Job from service by Account ID. """ function delete_job(jobID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -605,6 +712,9 @@ Deletes a single source server by ID. # Arguments - `source_server_id`: Request to delete Source Server from service by Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to delete Source Server from service by Account ID. """ function delete_source_server( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -681,6 +791,9 @@ Delete wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function delete_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -714,6 +827,7 @@ Retrieves detailed job log items with paging. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to describe Job log Account ID. - `"maxResults"`: Request to describe Job log item maximum results. - `"nextToken"`: Request to describe Job log next token. """ @@ -750,6 +864,7 @@ available only to *Support* and only used in response to relevant support ticket # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to describe job log items by Account ID. - `"filters"`: Request to describe Job log filters. - `"maxResults"`: Request to describe job log items by max results. - `"nextToken"`: Request to describe job log items by next token. @@ -849,6 +964,7 @@ Retrieves all SourceServers or multiple SourceServers by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to filter Source Servers list by Accoun ID. - `"filters"`: Request to filter Source Servers list. - `"maxResults"`: Request to filter Source Servers list by maximum results. - `"nextToken"`: Request to filter Source Servers list by next token. @@ -914,6 +1030,9 @@ Disassociate applications from wave. - `application_ids`: Application IDs list. - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function disassociate_applications( applicationIDs, waveID; aws_config::AbstractAWSConfig=global_aws_config() @@ -957,6 +1076,9 @@ Disassociate source servers from application. - `application_id`: Application ID. - `source_server_ids`: Source server IDs list. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function disassociate_source_servers( applicationID, sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config() @@ -1012,6 +1134,9 @@ dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be null # Arguments - `source_server_id`: Request to disconnect Source Server from service by Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to disconnect Source Server from service by Account ID. """ function disconnect_from_service( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1057,6 +1182,9 @@ dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be null # Arguments - `source_server_id`: Request to finalize Cutover by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to finalize Cutover by Source Account ID. """ function finalize_cutover(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -1092,6 +1220,9 @@ Lists all LaunchConfigurations available, filtered by Source Server IDs. # Arguments - `source_server_id`: Request to get Launch Configuration information by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to get Launch Configuration information by Account ID. """ function get_launch_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1129,6 +1260,9 @@ Lists all ReplicationConfigurations, filtered by Source Server ID. # Arguments - `source_server_id`: Request to get Replication Configuration by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to get Replication Configuration by Account ID. """ function get_replication_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1189,6 +1323,7 @@ Retrieves all applications or multiple applications by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Applications list Account ID. - `"filters"`: Applications list filters. - `"maxResults"`: Maximum results to return when listing applications. - `"nextToken"`: Request next token. @@ -1210,6 +1345,35 @@ function list_applications( ) end +""" + list_connectors() + list_connectors(params::Dict{String,<:Any}) + +List Connectors. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: List Connectors Request filters. +- `"maxResults"`: List Connectors Request max results. +- `"nextToken"`: List Connectors Request next token. +""" +function list_connectors(; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", "/ListConnectors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_connectors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/ListConnectors", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_export_errors(export_id) list_export_errors(export_id, params::Dict{String,<:Any}) @@ -1346,6 +1510,37 @@ function list_imports( ) end +""" + list_managed_accounts() + list_managed_accounts(params::Dict{String,<:Any}) + +List Managed Accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: List managed accounts request max results. +- `"nextToken"`: List managed accounts request next token. +""" +function list_managed_accounts(; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/ListManagedAccounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_managed_accounts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/ListManagedAccounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_source_server_actions(source_server_id) list_source_server_actions(source_server_id, params::Dict{String,<:Any}) @@ -1357,6 +1552,8 @@ List source server post migration custom actions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID to return when listing source server post migration custom + actions. - `"filters"`: Filters to apply when listing source server post migration custom actions. - `"maxResults"`: Maximum amount of items to return when listing source server post migration custom actions. @@ -1480,6 +1677,7 @@ Retrieves all waves or multiple waves by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request account ID. - `"filters"`: Waves list filters. - `"maxResults"`: Maximum results to return when listing waves. - `"nextToken"`: Request next token. @@ -1506,6 +1704,9 @@ lifecycle. state which equals DISCONNECTED or CUTOVER. # Arguments - `source_server_id`: Mark as archived by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Mark as archived by Account ID. """ function mark_as_archived(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -1532,6 +1733,46 @@ function mark_as_archived( ) end +""" + pause_replication(source_server_id) + pause_replication(source_server_id, params::Dict{String,<:Any}) + +Pause Replication. + +# Arguments +- `source_server_id`: Pause Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Pause Replication Request account ID. +""" +function pause_replication( + sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/PauseReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function pause_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/PauseReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_source_server_action(action_id, action_name, document_identifier, order, source_server_id) put_source_server_action(action_id, action_name, document_identifier, order, source_server_id, params::Dict{String,<:Any}) @@ -1547,6 +1788,7 @@ Put source server post migration custom action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Source server post migration custom account ID. - `"active"`: Source server post migration custom action active status. - `"category"`: Source server post migration custom action category. - `"description"`: Source server post migration custom action description. @@ -1697,6 +1939,9 @@ Remove source server post migration custom action. - `action_id`: Source server post migration custom action ID to remove. - `source_server_id`: Source server ID of the post migration custom action to remove. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Source server post migration account ID. """ function remove_source_server_action( actionID, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1784,6 +2029,46 @@ function remove_template_action( ) end +""" + resume_replication(source_server_id) + resume_replication(source_server_id, params::Dict{String,<:Any}) + +Resume Replication. + +# Arguments +- `source_server_id`: Resume Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Resume Replication Request account ID. +""" +function resume_replication( + sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/ResumeReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function resume_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/ResumeReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retry_data_replication(source_server_id) retry_data_replication(source_server_id, params::Dict{String,<:Any}) @@ -1796,6 +2081,9 @@ state. # Arguments - `source_server_id`: Retry data replication for Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Retry data replication for Account ID. """ function retry_data_replication( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1837,6 +2125,7 @@ property to CUTTING_OVER. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Start Cutover by Account IDs - `"tags"`: Start Cutover by Tags. """ function start_cutover(sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1963,6 +2252,9 @@ Starts replication for SNAPSHOT_SHIPPING agents. # Arguments - `source_server_id`: ID of source server on which to start replication. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID on which to start replication. """ function start_replication( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2004,6 +2296,7 @@ property to TESTING. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Start Test for Account ID. - `"tags"`: Start Test by Tags. """ function start_test(sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2033,6 +2326,44 @@ function start_test( ) end +""" + stop_replication(source_server_id) + stop_replication(source_server_id, params::Dict{String,<:Any}) + +Stop Replication. + +# Arguments +- `source_server_id`: Stop Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Stop Replication Request account ID. +""" +function stop_replication(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/StopReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/StopReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -2084,6 +2415,7 @@ CUTOVER. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Terminate Target instance by Account ID - `"tags"`: Terminate Target instance by Tags. """ function terminate_target_instances( @@ -2124,6 +2456,9 @@ Unarchive application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function unarchive_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2161,6 +2496,9 @@ Unarchive wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function unarchive_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -2232,6 +2570,7 @@ Update application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Application description. - `"name"`: Application name. """ @@ -2262,17 +2601,58 @@ function update_application( ) end +""" + update_connector(connector_id) + update_connector(connector_id, params::Dict{String,<:Any}) + +Update Connector. + +# Arguments +- `connector_id`: Update Connector request connector ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Update Connector request name. +- `"ssmCommandConfig"`: Update Connector request SSM command config. +""" +function update_connector(connectorID; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/UpdateConnector", + Dict{String,Any}("connectorID" => connectorID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_connector( + connectorID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/UpdateConnector", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("connectorID" => connectorID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_launch_configuration(source_server_id) update_launch_configuration(source_server_id, params::Dict{String,<:Any}) -Updates multiple LaunchConfigurations by Source Server ID. +Updates multiple LaunchConfigurations by Source Server ID. bootMode valid values are +LEGACY_BIOS | UEFI # Arguments - `source_server_id`: Update Launch configuration by Source Server ID request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Update Launch configuration Account ID. - `"bootMode"`: Update Launch configuration boot mode request. - `"copyPrivateIp"`: Update Launch configuration copy Private IP request. - `"copyTags"`: Update Launch configuration copy Tags request. @@ -2381,6 +2761,7 @@ Allows you to update multiple ReplicationConfigurations by Source Server ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Update replication configuration Account ID request. - `"associateDefaultSecurityGroup"`: Update replication configuration associate default Application Migration Service Security group request. - `"bandwidthThrottling"`: Update replication configuration bandwidth throttling request. @@ -2400,6 +2781,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"stagingAreaTags"`: Update replication configuration Staging Area Tags request. - `"useDedicatedReplicationServer"`: Update replication configuration use dedicated Replication Server request. +- `"useFipsEndpoint"`: Update replication configuration use Fips Endpoint. """ function update_replication_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2462,6 +2844,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"stagingAreaTags"`: Update replication configuration template Staging Area Tags request. - `"useDedicatedReplicationServer"`: Update replication configuration template use dedicated Replication Server request. +- `"useFipsEndpoint"`: Update replication configuration template use Fips Endpoint request. """ function update_replication_configuration_template( replicationConfigurationTemplateID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2499,6 +2882,47 @@ function update_replication_configuration_template( ) end +""" + update_source_server(source_server_id) + update_source_server(source_server_id, params::Dict{String,<:Any}) + +Update Source Server. + +# Arguments +- `source_server_id`: Update Source Server request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Update Source Server request account ID. +- `"connectorAction"`: Update Source Server request connector action. +""" +function update_source_server( + sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/UpdateSourceServer", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_source_server( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/UpdateSourceServer", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_source_server_replication_type(replication_type, source_server_id) update_source_server_replication_type(replication_type, source_server_id, params::Dict{String,<:Any}) @@ -2510,6 +2934,9 @@ replication type. - `replication_type`: Replication type to which to update source server. - `source_server_id`: ID of source server on which to update replication type. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID on which to update replication type. """ function update_source_server_replication_type( replicationType, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2558,6 +2985,7 @@ Update wave. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Wave description. - `"name"`: Wave name. """ diff --git a/src/services/migrationhub_config.jl b/src/services/migrationhub_config.jl index 3b9679bf81..6dd1dfda89 100644 --- a/src/services/migrationhub_config.jl +++ b/src/services/migrationhub_config.jl @@ -50,6 +50,43 @@ function create_home_region_control( ) end +""" + delete_home_region_control(control_id) + delete_home_region_control(control_id, params::Dict{String,<:Any}) + +This operation deletes the home region configuration for the calling account. The operation +does not delete discovery or migration tracking data in the home region. + +# Arguments +- `control_id`: A unique identifier that's generated for each home region control. It's + always a string that begins with \"hrc-\" followed by 12 lowercase letters and numbers. + +""" +function delete_home_region_control( + ControlId; aws_config::AbstractAWSConfig=global_aws_config() +) + return migrationhub_config( + "DeleteHomeRegionControl", + Dict{String,Any}("ControlId" => ControlId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_home_region_control( + ControlId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return migrationhub_config( + "DeleteHomeRegionControl", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ControlId" => ControlId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_home_region_controls() describe_home_region_controls(params::Dict{String,<:Any}) diff --git a/src/services/migrationhuborchestrator.jl b/src/services/migrationhuborchestrator.jl index a0a77fe97a..ce63e6c83d 100644 --- a/src/services/migrationhuborchestrator.jl +++ b/src/services/migrationhuborchestrator.jl @@ -5,46 +5,96 @@ using AWS.Compat using AWS.UUIDs """ - create_workflow(application_configuration_id, input_parameters, name, template_id) - create_workflow(application_configuration_id, input_parameters, name, template_id, params::Dict{String,<:Any}) + create_template(template_name, template_source) + create_template(template_name, template_source, params::Dict{String,<:Any}) + +Creates a migration workflow template. + +# Arguments +- `template_name`: The name of the migration workflow template. +- `template_source`: The source of the migration workflow template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Idempotency in the Smithy + documentation. +- `"tags"`: The tags to add to the migration workflow template. +- `"templateDescription"`: A description of the migration workflow template. +""" +function create_template( + templateName, templateSource; aws_config::AbstractAWSConfig=global_aws_config() +) + return migrationhuborchestrator( + "POST", + "/template", + Dict{String,Any}( + "templateName" => templateName, + "templateSource" => templateSource, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_template( + templateName, + templateSource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return migrationhuborchestrator( + "POST", + "/template", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "templateName" => templateName, + "templateSource" => templateSource, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_workflow(input_parameters, name, template_id) + create_workflow(input_parameters, name, template_id, params::Dict{String,<:Any}) Create a workflow to orchestrate your migrations. # Arguments -- `application_configuration_id`: The configuration ID of the application configured in - Application Discovery Service. - `input_parameters`: The input parameters required to create a migration workflow. - `name`: The name of the migration workflow. - `template_id`: The ID of the template. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"applicationConfigurationId"`: The configuration ID of the application configured in + Application Discovery Service. - `"description"`: The description of the migration workflow. - `"stepTargets"`: The servers on which a step will be run. - `"tags"`: The tags to add on a migration workflow. """ function create_workflow( - applicationConfigurationId, - inputParameters, - name, - templateId; - aws_config::AbstractAWSConfig=global_aws_config(), + inputParameters, name, templateId; aws_config::AbstractAWSConfig=global_aws_config() ) return migrationhuborchestrator( "POST", "/migrationworkflow/", Dict{String,Any}( - "applicationConfigurationId" => applicationConfigurationId, - "inputParameters" => inputParameters, - "name" => name, - "templateId" => templateId, + "inputParameters" => inputParameters, "name" => name, "templateId" => templateId ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_workflow( - applicationConfigurationId, inputParameters, name, templateId, @@ -58,7 +108,6 @@ function create_workflow( mergewith( _merge, Dict{String,Any}( - "applicationConfigurationId" => applicationConfigurationId, "inputParameters" => inputParameters, "name" => name, "templateId" => templateId, @@ -188,6 +237,33 @@ function create_workflow_step_group( ) end +""" + delete_template(id) + delete_template(id, params::Dict{String,<:Any}) + +Deletes a migration workflow template. + +# Arguments +- `id`: The ID of the request to delete a migration workflow template. + +""" +function delete_template(id; aws_config::AbstractAWSConfig=global_aws_config()) + return migrationhuborchestrator( + "DELETE", "/template/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_template( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return migrationhuborchestrator( + "DELETE", + "/template/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_workflow(id) delete_workflow(id, params::Dict{String,<:Any}) @@ -452,7 +528,7 @@ Get a step in the migration workflow. # Arguments - `id`: The ID of the step. -- `step_group_id`: desThe ID of the step group. +- `step_group_id`: The ID of the step group. - `workflow_id`: The ID of the migration workflow. """ @@ -997,6 +1073,45 @@ function untag_resource( ) end +""" + update_template(id) + update_template(id, params::Dict{String,<:Any}) + +Updates a migration workflow template. + +# Arguments +- `id`: The ID of the request to update a migration workflow template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. +- `"templateDescription"`: The description of the migration workflow template to update. +- `"templateName"`: The name of the migration workflow template to update. +""" +function update_template(id; aws_config::AbstractAWSConfig=global_aws_config()) + return migrationhuborchestrator( + "POST", + "/template/$(id)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_template( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return migrationhuborchestrator( + "POST", + "/template/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_workflow(id) update_workflow(id, params::Dict{String,<:Any}) diff --git a/src/services/migrationhubstrategy.jl b/src/services/migrationhubstrategy.jl index be4ed88c31..8b21abe9af 100644 --- a/src/services/migrationhubstrategy.jl +++ b/src/services/migrationhubstrategy.jl @@ -323,6 +323,43 @@ function get_server_strategies( ) end +""" + list_analyzable_servers() + list_analyzable_servers(params::Dict{String,<:Any}) + +Retrieves a list of all the servers fetched from customer vCenter using Strategy +Recommendation Collector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to include in the response. The maximum value + is 100. +- `"nextToken"`: The token from a previous call that you use to retrieve the next set of + results. For example, if a previous call to this action returned 100 items, but you set + maxResults to 10. You'll receive a set of 10 results along with a token. You then use the + returned token to retrieve the next set of 10. +- `"sort"`: Specifies whether to sort by ascending (ASC) or descending (DESC) order. +""" +function list_analyzable_servers(; aws_config::AbstractAWSConfig=global_aws_config()) + return migrationhubstrategy( + "POST", + "/list-analyzable-servers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_analyzable_servers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return migrationhubstrategy( + "POST", + "/list-analyzable-servers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_components() list_application_components(params::Dict{String,<:Any}) @@ -513,6 +550,7 @@ end # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assessmentDataSourceType"`: The data source type of an assessment to be started. - `"assessmentTargets"`: List of criteria for assessment. - `"s3bucketForAnalysisData"`: The S3 bucket used by the collectors to send analysis data to the service. The bucket name must begin with migrationhub-strategy-. diff --git a/src/services/mq.jl b/src/services/mq.jl index 810818bae8..69f6b4e24a 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -17,17 +17,21 @@ broker instance. ec2:DeleteNetworkInterface ec2:DeleteNetworkInterfacePermission ec2:DetachNetworkInterface ec2:DescribeInternetGateways ec2:DescribeNetworkInterfaces ec2:DescribeNetworkInterfacePermissions ec2:DescribeRouteTables ec2:DescribeSecurityGroups ec2:DescribeSubnets ec2:DescribeVpcs For more information, see Create an IAM User and Get -Your AWS Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in -the Amazon MQ Developer Guide. +Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ Elastic +Network Interface in the Amazon MQ Developer Guide. # Arguments - `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. Set to true by default, if no value is specified. -- `broker_name`: Required. The broker's name. This value must be unique in your AWS - account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, - and must not contain white spaces, brackets, wildcard characters, or special characters. +- `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web + Services account, 1-50 characters long, must contain only letters, numbers, dashes, and + underscores, and must not contain white spaces, brackets, wildcard characters, or special + characters. Do not add personally identifiable information (PII) or other confidential or + sensitive information in broker names. Broker names are accessible to other Amazon Web + Services services, including CloudWatch Logs. Broker names are not intended to be used for + private or sensitive data. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. @@ -36,13 +40,10 @@ the Amazon MQ Developer Guide. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. -- `users`: Required. The list of broker users (persons or applications) who can access - queues and topics. This value can contain only alphanumeric characters, dashes, periods, - underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Amazon MQ for - RabbitMQ When you create an Amazon MQ for RabbitMQ broker, one and only one administrative - user is accepted and created when a broker is first provisioned. All subsequent broker - users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web - console. +- `users`: The list of broker users (persons or applications) who can access queues and + topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is + accepted and created when a broker is first provisioned. All subsequent broker users are + created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -50,11 +51,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys broker. The default is SIMPLE. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. - Amazon MQ passes your ID with the API action. Note: We recommend using a Universally Unique + Amazon MQ passes your ID with the API action. We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency. -- `"encryptionOptions"`: Encryption options for the broker. Does not apply to RabbitMQ - brokers. +- `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. +- `"dataReplicationPrimaryBrokerArn"`: The Amazon Resource Name (ARN) of the primary broker + that is used to replicate data from in a data replication pair, and is applied to the + replica broker. Must be set when dataReplicationMode is set to CRDR. +- `"encryptionOptions"`: Encryption options for the broker. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -71,8 +75,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet. If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to - which the specified subnets belong must be owned by your AWS account. Amazon MQ will not be - able to create VPC endpoints in VPCs that are not owned by your AWS account. + which the specified subnets belong must be owned by your Amazon Web Services account. + Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your + Amazon Web Services account. - `"tags"`: Create tags when creating the broker. """ function create_broker( @@ -241,7 +246,10 @@ end create_user(broker-id, password, username) create_user(broker-id, password, username, params::Dict{String,<:Any}) -Creates an ActiveMQ user. +Creates an ActiveMQ user. Do not add personally identifiable information (PII) or other +confidential or sensitive information in broker usernames. Broker usernames are accessible +to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not +intended to be used for private or sensitive data. # Arguments - `broker-id`: The unique ID that Amazon MQ generates for the broker. @@ -258,6 +266,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"groups"`: The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. +- `"replicationUser"`: Defines if this user is intended for CRDR replication purposes. """ function create_user( broker_id, password, username; aws_config::AbstractAWSConfig=global_aws_config() @@ -771,6 +780,42 @@ function list_users( ) end +""" + promote(broker-id, mode) + promote(broker-id, mode, params::Dict{String,<:Any}) + +Promotes a data replication replica broker to the primary broker role. + +# Arguments +- `broker-id`: The unique ID that Amazon MQ generates for the broker. +- `mode`: The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, + FAILOVER. + +""" +function promote(broker_id, mode; aws_config::AbstractAWSConfig=global_aws_config()) + return mq( + "POST", + "/v1/brokers/$(broker-id)/promote", + Dict{String,Any}("mode" => mode); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function promote( + broker_id, + mode, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mq( + "POST", + "/v1/brokers/$(broker-id)/promote", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("mode" => mode), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ reboot_broker(broker-id) reboot_broker(broker-id, params::Dict{String,<:Any}) @@ -820,6 +865,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. - `"configuration"`: A list of information about the configuration. +- `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. - `"engineVersion"`: The broker engine version. For a list of supported engine versions, see Supported engines. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of @@ -861,7 +907,8 @@ Updates the specified configuration. # Arguments - `configuration-id`: The unique ID that Amazon MQ generates for the configuration. -- `data`: Required. The base64-encoded XML configuration. +- `data`: Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for + RabbitMQ: the base64-encoded Cuttlefish configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -914,6 +961,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"password"`: The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=). +- `"replicationUser"`: Defines whether the user is intended for data replication. """ function update_user(broker_id, username; aws_config::AbstractAWSConfig=global_aws_config()) return mq( diff --git a/src/services/mwaa.jl b/src/services/mwaa.jl index bc891df460..8370b5be2d 100644 --- a/src/services/mwaa.jl +++ b/src/services/mwaa.jl @@ -61,20 +61,45 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. - `"AirflowVersion"`: The Apache Airflow version for your environment. If no value is - specified, it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, - and 2.5.1. For more information, see Apache Airflow versions on Amazon Managed Workflows - for Apache Airflow (MWAA). + specified, it defaults to the latest version. For more information, see Apache Airflow + versions on Amazon Managed Workflows for Apache Airflow (MWAA). Valid values: 1.10.12, + 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2 2.8.1 +- `"EndpointManagement"`: Defines whether the VPC endpoints configured for the environment + are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA + will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must + create, and manage, the VPC endpoints for your VPC. If you choose to create an environment + in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the + environment will remain in PENDING status until you create the VPC endpoints. If you do not + take action to create the endpoints within 72 hours, the status will change to + CREATE_FAILED. You can delete the failed environment and create a new one. - `"EnvironmentClass"`: The environment class type. Valid values: mw1.small, mw1.medium, - mw1.large. For more information, see Amazon MWAA environment class. + mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment + class. - `"KmsKey"`: The Amazon Web Services Key Management Service (KMS) key to encrypt the data in your environment. You can use an Amazon Web Services owned CMK, or a Customer managed CMK (advanced). For more information, see Create an Amazon MWAA environment. - `"LoggingConfiguration"`: Defines the Apache Airflow logs to send to CloudWatch Logs. +- `"MaxWebservers"`: The maximum number of web servers that you want to run in your + environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number + you specify for MaxWebservers when you interact with your Apache Airflow environment using + Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your + workload requires network calls to the Apache Airflow REST API with a high + transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up + to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the + additional web servers, and scales down to the number set in MinxWebserers. Valid values: + Accepts between 2 and 5. Defaults to 2. - `"MaxWorkers"`: The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers. +- `"MinWebservers"`: The minimum number of web servers that you want to run in your + environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number + you specify for MaxWebservers when you interact with your Apache Airflow environment using + Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and + the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales + down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults + to 2. - `"MinWorkers"`: The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA @@ -108,8 +133,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: The key-value tag pairs you want to associate to your environment. For example, \"Environment\": \"Staging\". For more information, see Tagging Amazon Web Services resources. -- `"WebserverAccessMode"`: The Apache Airflow Web server access mode. For more information, - see Apache Airflow access modes. +- `"WebserverAccessMode"`: Defines the access mode for the Apache Airflow web server. For + more information, see Apache Airflow access modes. - `"WeeklyMaintenanceWindowStart"`: The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 @@ -457,21 +482,38 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid - values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, and 2.5.1. + values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1. - `"DagS3Path"`: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. - `"EnvironmentClass"`: The environment class type. Valid values: mw1.small, mw1.medium, - mw1.large. For more information, see Amazon MWAA environment class. + mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment + class. - `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. - `"LoggingConfiguration"`: The Apache Airflow log types to send to CloudWatch Logs. +- `"MaxWebservers"`: The maximum number of web servers that you want to run in your + environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number + you specify for MaxWebservers when you interact with your Apache Airflow environment using + Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your + workload requires network calls to the Apache Airflow REST API with a high + transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up + to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the + additional web servers, and scales down to the number set in MinxWebserers. Valid values: + Accepts between 2 and 5. Defaults to 2. - `"MaxWorkers"`: The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers. +- `"MinWebservers"`: The minimum number of web servers that you want to run in your + environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number + you specify for MaxWebservers when you interact with your Apache Airflow environment using + Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and + the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales + down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults + to 2. - `"MinWorkers"`: The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA diff --git a/src/services/neptune.jl b/src/services/neptune.jl index 2220e44e5d..b6ff54808e 100644 --- a/src/services/neptune.jl +++ b/src/services/neptune.jl @@ -471,8 +471,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is enabled. -- `"EnableCloudwatchLogsExports"`: The list of log types that need to be enabled for - exporting to CloudWatch Logs. +- `"EnableCloudwatchLogsExports"`: A list of the log types that this DB cluster should + export to CloudWatch Logs. Valid log types are: audit (to publish audit logs) and slowquery + (to publish slow-query logs). See Publishing Neptune logs to Amazon CloudWatch logs. - `"EnableIAMDatabaseAuthentication"`: If set to true, enables Amazon Identity and Access Management (IAM) authentication for the entire DB cluster (this cannot be set at an instance level). Default: false. @@ -503,20 +504,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each - Amazon Region. To see the time blocks available, see Adjusting the Preferred Maintenance - Window in the Amazon Neptune User Guide. Constraints: Must be in the format - hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with - the preferred maintenance window. Must be at least 30 minutes. + Amazon Region. To see the time blocks available, see Neptune Maintenance Window in the + Amazon Neptune User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be + in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance + window. Must be at least 30 minutes. - `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Region, - occurring on a random day of the week. To see the time blocks available, see Adjusting the - Preferred Maintenance Window in the Amazon Neptune User Guide. Valid Days: Mon, Tue, Wed, - Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. + occurring on a random day of the week. To see the time blocks available, see Neptune + Maintenance Window in the Amazon Neptune User Guide. Valid Days: Mon, Tue, Wed, Thu, Fri, + Sat, Sun. Constraints: Minimum 30-minute window. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. -- `"ServerlessV2ScalingConfiguration"`: +- `"ServerlessV2ScalingConfiguration"`: Contains the scaling configuration of a Neptune + Serverless DB cluster. For more information, see Using Amazon Neptune Serverless in the + Amazon Neptune User Guide. - `"StorageEncrypted"`: Specifies whether the DB cluster is encrypted. +- `"StorageType"`: The storage type to associate with the DB cluster. Valid Values: + standard | iopt1 Default: standard When you create a Neptune cluster with the + storage type set to iopt1, the storage type is returned in the response. The storage type + isn't returned when you set it to standard. - `"Tags"`: The tags to assign to the new DB cluster. - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB cluster. @@ -2657,7 +2664,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys You must specify a minimum value of 1. Default: 1 Constraints: Must be a value from 1 to 35 - `"CloudwatchLogsExportConfiguration"`: The configuration setting for the log types to be - enabled for export to CloudWatch Logs for a specific DB cluster. + enabled for export to CloudWatch Logs for a specific DB cluster. See Using the CLI to + publish Neptune audit logs to CloudWatch Logs. - `"CopyTagsToSnapshot"`: If set to true, tags are copied to any snapshot of the DB cluster that is created. - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to use for @@ -2698,7 +2706,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a 30-minute window selected at random from an 8-hour block of time for each Amazon Region, occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. -- `"ServerlessV2ScalingConfiguration"`: +- `"ServerlessV2ScalingConfiguration"`: Contains the scaling configuration of a Neptune + Serverless DB cluster. For more information, see Using Amazon Neptune Serverless in the + Amazon Neptune User Guide. +- `"StorageType"`: The storage type to associate with the DB cluster. Valid Values: + standard | iopt1 Default: standard - `"VpcSecurityGroupIds"`: A list of VPC security groups that the DB cluster will belong to. """ function modify_dbcluster( @@ -3771,7 +3783,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptionGroupName"`: (Not supported by Neptune) - `"Port"`: The port number on which the new DB cluster accepts connections. Constraints: Value must be 1150-65535 Default: The same port as the original DB cluster. -- `"ServerlessV2ScalingConfiguration"`: +- `"ServerlessV2ScalingConfiguration"`: Contains the scaling configuration of a Neptune + Serverless DB cluster. For more information, see Using Amazon Neptune Serverless in the + Amazon Neptune User Guide. +- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. Valid + values: standard, iopt1 Default: standard - `"Tags"`: The tags to be assigned to the restored DB cluster. - `"VpcSecurityGroupIds"`: A list of VPC security groups that the new DB cluster will belong to. @@ -3881,7 +3897,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys source DB cluster. copy-on-write - The new DB cluster is restored as a clone of the source DB cluster. If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster. -- `"ServerlessV2ScalingConfiguration"`: +- `"ServerlessV2ScalingConfiguration"`: Contains the scaling configuration of a Neptune + Serverless DB cluster. For more information, see Using Amazon Neptune Serverless in the + Amazon Neptune User Guide. +- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. Valid + values: standard, iopt1 Default: standard - `"Tags"`: The tags to be applied to the restored DB cluster. - `"UseLatestRestorableTime"`: A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise. Default: false Constraints: Cannot be diff --git a/src/services/neptunedata.jl b/src/services/neptunedata.jl new file mode 100644 index 0000000000..46c728983f --- /dev/null +++ b/src/services/neptunedata.jl @@ -0,0 +1,1931 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: neptunedata +using AWS.Compat +using AWS.UUIDs + +""" + cancel_gremlin_query(query_id) + cancel_gremlin_query(query_id, params::Dict{String,<:Any}) + +Cancels a Gremlin query. See Gremlin query cancellation for more information. When invoking +this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or +role making the request must have a policy attached that allows the neptune-db:CancelQuery +IAM action in that cluster. + +# Arguments +- `query_id`: The unique identifier that identifies the query to be canceled. + +""" +function cancel_gremlin_query(queryId; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/gremlin/status/$(queryId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_gremlin_query( + queryId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/gremlin/status/$(queryId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_loader_job(load_id) + cancel_loader_job(load_id, params::Dict{String,<:Any}) + +Cancels a specified load job. This is an HTTP DELETE request. See Neptune Loader Get-Status +API for more information. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:CancelLoaderJob IAM action in that cluster.. + +# Arguments +- `load_id`: The ID of the load job to be deleted. + +""" +function cancel_loader_job(loadId; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/loader/$(loadId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_loader_job( + loadId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/loader/$(loadId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_mldata_processing_job(id) + cancel_mldata_processing_job(id, params::Dict{String,<:Any}) + +Cancels a Neptune ML data processing job. See The dataprocessing command. When invoking +this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or +role making the request must have a policy attached that allows the +neptune-db:CancelMLDataProcessingJob IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the data-processing job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clean"`: If set to TRUE, this flag specifies that all Neptune ML S3 artifacts should be + deleted when the job is stopped. The default is FALSE. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function cancel_mldata_processing_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/ml/dataprocessing/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_mldata_processing_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/ml/dataprocessing/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_mlmodel_training_job(id) + cancel_mlmodel_training_job(id, params::Dict{String,<:Any}) + +Cancels a Neptune ML model training job. See Model training using the modeltraining +command. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:CancelMLModelTrainingJob IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the model-training job to be canceled. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clean"`: If set to TRUE, this flag specifies that all Amazon S3 artifacts should be + deleted when the job is stopped. The default is FALSE. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function cancel_mlmodel_training_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/ml/modeltraining/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_mlmodel_training_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/ml/modeltraining/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_mlmodel_transform_job(id) + cancel_mlmodel_transform_job(id, params::Dict{String,<:Any}) + +Cancels a specified model transform job. See Use a trained model to generate new model +artifacts. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:CancelMLModelTransformJob IAM action in that cluster. + +# Arguments +- `id`: The unique ID of the model transform job to be canceled. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clean"`: If this flag is set to TRUE, all Neptune ML S3 artifacts should be deleted + when the job is stopped. The default is FALSE. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function cancel_mlmodel_transform_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/ml/modeltransform/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_mlmodel_transform_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/ml/modeltransform/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + cancel_open_cypher_query(query_id) + cancel_open_cypher_query(query_id, params::Dict{String,<:Any}) + +Cancels a specified openCypher query. See Neptune openCypher status endpoint for more +information. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:CancelQuery IAM action in that cluster. + +# Arguments +- `query_id`: The unique ID of the openCypher query to cancel. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"silent"`: If set to TRUE, causes the cancelation of the openCypher query to happen + silently. +""" +function cancel_open_cypher_query( + queryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/opencypher/status/$(queryId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_open_cypher_query( + queryId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/opencypher/status/$(queryId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_mlendpoint() + create_mlendpoint(params::Dict{String,<:Any}) + +Creates a new Neptune ML inference endpoint that lets you query one specific model that the +model-training process constructed. See Managing inference endpoints using the endpoints +command. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:CreateMLEndpoint IAM action in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"id"`: A unique identifier for the new inference endpoint. The default is an + autogenerated timestamped name. +- `"instanceCount"`: The minimum number of Amazon EC2 instances to deploy to an endpoint + for prediction. The default is 1 +- `"instanceType"`: The type of Neptune ML instance to use for online servicing. The + default is ml.m5.xlarge. Choosing the ML instance for an inference endpoint depends on the + task type, the graph size, and your budget. +- `"mlModelTrainingJobId"`: The job Id of the completed model-training job that has created + the model that the inference endpoint will point to. You must supply either the + mlModelTrainingJobId or the mlModelTransformJobId. +- `"mlModelTransformJobId"`: The job Id of the completed model-transform job. You must + supply either the mlModelTrainingJobId or the mlModelTransformJobId. +- `"modelName"`: Model type for training. By default the Neptune ML model is automatically + based on the modelType used in data processing, but you can specify a different model type + here. The default is rgcn for heterogeneous graphs and kge for knowledge graphs. The only + valid value for heterogeneous graphs is rgcn. Valid values for knowledge graphs are: kge, + transe, distmult, and rotate. +- `"neptuneIamRoleArn"`: The ARN of an IAM role providing Neptune access to SageMaker and + Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will be thrown. +- `"update"`: If set to true, update indicates that this is an update request. The default + is false. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId. +- `"volumeEncryptionKMSKey"`: The Amazon Key Management Service (Amazon KMS) key that + SageMaker uses to encrypt data on the storage volume attached to the ML compute instances + that run the training job. The default is None. +""" +function create_mlendpoint(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "POST", "/ml/endpoints"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function create_mlendpoint( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/ml/endpoints", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_mlendpoint(id) + delete_mlendpoint(id, params::Dict{String,<:Any}) + +Cancels the creation of a Neptune ML inference endpoint. See Managing inference endpoints +using the endpoints command. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:DeleteMLEndpoint IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the inference endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clean"`: If this flag is set to TRUE, all Neptune ML S3 artifacts should be deleted + when the job is stopped. The default is FALSE. +- `"neptuneIamRoleArn"`: The ARN of an IAM role providing Neptune access to SageMaker and + Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will be thrown. +""" +function delete_mlendpoint(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/ml/endpoints/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_mlendpoint( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/ml/endpoints/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_propertygraph_statistics() + delete_propertygraph_statistics(params::Dict{String,<:Any}) + +Deletes statistics for Gremlin and openCypher (property graph) data. When invoking this +operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role +making the request must have a policy attached that allows the neptune-db:DeleteStatistics +IAM action in that cluster. + +""" +function delete_propertygraph_statistics(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/propertygraph/statistics"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_propertygraph_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/propertygraph/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_sparql_statistics() + delete_sparql_statistics(params::Dict{String,<:Any}) + +Deletes SPARQL statistics When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:DeleteStatistics IAM action in that cluster. + +""" +function delete_sparql_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "DELETE", + "/sparql/statistics"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_sparql_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "DELETE", + "/sparql/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_fast_reset(action) + execute_fast_reset(action, params::Dict{String,<:Any}) + +The fast reset REST API lets you reset a Neptune graph quicky and easily, removing all of +its data. Neptune fast reset is a two-step process. First you call ExecuteFastReset with +action set to initiateDatabaseReset. This returns a UUID token which you then include when +calling ExecuteFastReset again with action set to performDatabaseReset. See Empty an Amazon +Neptune DB cluster using the fast reset API. When invoking this operation in a Neptune +cluster that has IAM authentication enabled, the IAM user or role making the request must +have a policy attached that allows the neptune-db:ResetDatabase IAM action in that cluster. + +# Arguments +- `action`: The fast reset action. One of the following values: initiateDatabaseReset +   –   This action generates a unique token needed to actually perform the fast reset. + performDatabaseReset   –   This action uses the token generated by the + initiateDatabaseReset action to actually perform the fast reset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"token"`: The fast-reset token to initiate the reset. +""" +function execute_fast_reset(action; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "POST", + "/system", + Dict{String,Any}("action" => action); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_fast_reset( + action, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/system", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("action" => action), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_gremlin_explain_query(gremlin) + execute_gremlin_explain_query(gremlin, params::Dict{String,<:Any}) + +Executes a Gremlin Explain query. Amazon Neptune has added a Gremlin feature named explain +that provides is a self-service tool for understanding the execution approach being taken +by the Neptune engine for the query. You invoke it by adding an explain parameter to an +HTTP call that submits a Gremlin query. The explain feature provides information about the +logical structure of query execution plans. You can use this information to identify +potential evaluation and execution bottlenecks and to tune your query, as explained in +Tuning Gremlin queries. You can also use query hints to improve query execution plans. When +invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM +user or role making the request must have a policy attached that allows one of the +following IAM actions in that cluster, depending on the query: +neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery +neptune-db:DeleteDataViaQuery Note that the neptune-db:QueryLanguage:Gremlin IAM +condition key can be used in the policy document to restrict the use of Gremlin queries +(see Condition keys available in Neptune IAM data-access policy statements). + +# Arguments +- `gremlin`: The Gremlin explain query string. + +""" +function execute_gremlin_explain_query( + gremlin; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/gremlin/explain", + Dict{String,Any}("gremlin" => gremlin); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_gremlin_explain_query( + gremlin, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/gremlin/explain", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("gremlin" => gremlin), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_gremlin_profile_query(gremlin) + execute_gremlin_profile_query(gremlin, params::Dict{String,<:Any}) + +Executes a Gremlin Profile query, which runs a specified traversal, collects various +metrics about the run, and produces a profile report as output. See Gremlin profile API in +Neptune for details. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the +neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to +restrict the use of Gremlin queries (see Condition keys available in Neptune IAM +data-access policy statements). + +# Arguments +- `gremlin`: The Gremlin query string to profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"profile.chop"`: If non-zero, causes the results string to be truncated at that number + of characters. If set to zero, the string contains all the results. +- `"profile.indexOps"`: If this flag is set to TRUE, the results include a detailed report + of all index operations that took place during query execution and serialization. +- `"profile.results"`: If this flag is set to TRUE, the query results are gathered and + displayed as part of the profile report. If FALSE, only the result count is displayed. +- `"profile.serializer"`: If non-null, the gathered results are returned in a serialized + response message in the format specified by this parameter. See Gremlin profile API in + Neptune for more information. +""" +function execute_gremlin_profile_query( + gremlin; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/gremlin/profile", + Dict{String,Any}("gremlin" => gremlin); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_gremlin_profile_query( + gremlin, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/gremlin/profile", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("gremlin" => gremlin), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_gremlin_query(gremlin) + execute_gremlin_query(gremlin, params::Dict{String,<:Any}) + +This commands executes a Gremlin query. Amazon Neptune is compatible with Apache TinkerPop3 +and Gremlin, so you can use the Gremlin traversal language to query the graph, as described +under The Graph in the Apache TinkerPop3 documentation. More details can also be found in +Accessing a Neptune graph with Gremlin. When invoking this operation in a Neptune cluster +that has IAM authentication enabled, the IAM user or role making the request must have a +policy attached that enables one of the following IAM actions in that cluster, depending on +the query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery +neptune-db:DeleteDataViaQuery Note that the neptune-db:QueryLanguage:Gremlin IAM +condition key can be used in the policy document to restrict the use of Gremlin queries +(see Condition keys available in Neptune IAM data-access policy statements). + +# Arguments +- `gremlin`: Using this API, you can run Gremlin queries in string format much as you can + using the HTTP endpoint. The interface is compatible with whatever Gremlin version your DB + cluster is using (see the Tinkerpop client section to determine which Gremlin releases your + engine version supports). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accept"`: If non-null, the query results are returned in a serialized response message + in the format specified by this parameter. See the GraphSON section in the TinkerPop + documentation for a list of the formats that are currently supported. +""" +function execute_gremlin_query(gremlin; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "POST", + "/gremlin", + Dict{String,Any}("gremlin" => gremlin); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_gremlin_query( + gremlin, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/gremlin", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("gremlin" => gremlin), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_open_cypher_explain_query(explain, query) + execute_open_cypher_explain_query(explain, query, params::Dict{String,<:Any}) + +Executes an openCypher explain request. See The openCypher explain feature for more +information. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the +neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to +restrict the use of openCypher queries (see Condition keys available in Neptune IAM +data-access policy statements). + +# Arguments +- `explain`: The openCypher explain mode. Can be one of: static, dynamic, or details. +- `query`: The openCypher query string. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: The openCypher query parameters. +""" +function execute_open_cypher_explain_query( + explain, query; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/opencypher/explain", + Dict{String,Any}("explain" => explain, "query" => query); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_open_cypher_explain_query( + explain, + query, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/opencypher/explain", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("explain" => explain, "query" => query), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + execute_open_cypher_query(query) + execute_open_cypher_query(query, params::Dict{String,<:Any}) + +Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more +information. Neptune supports building graph applications using openCypher, which is +currently one of the most popular query languages among developers working with graph +databases. Developers, business analysts, and data scientists like openCypher's +declarative, SQL-inspired syntax because it provides a familiar structure in which to +querying property graphs. The openCypher language was originally developed by Neo4j, then +open-sourced in 2015 and contributed to the openCypher project under an Apache 2 +open-source license. Note that when invoking this operation in a Neptune cluster that has +IAM authentication enabled, the IAM user or role making the request must have a policy +attached that allows one of the following IAM actions in that cluster, depending on the +query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery +neptune-db:DeleteDataViaQuery Note also that the neptune-db:QueryLanguage:OpenCypher IAM +condition key can be used in the policy document to restrict the use of openCypher queries +(see Condition keys available in Neptune IAM data-access policy statements). + +# Arguments +- `query`: The openCypher query string to be executed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"parameters"`: The openCypher query parameters for query execution. See Examples of + openCypher parameterized queries for more information. +""" +function execute_open_cypher_query(query; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "POST", + "/opencypher", + Dict{String,Any}("query" => query); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function execute_open_cypher_query( + query, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/opencypher", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("query" => query), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_engine_status() + get_engine_status(params::Dict{String,<:Any}) + +Retrieves the status of the graph database on the host. When invoking this operation in a +Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:GetEngineStatus IAM action +in that cluster. + +""" +function get_engine_status(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/status"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_engine_status( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", "/status", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + get_gremlin_query_status(query_id) + get_gremlin_query_status(query_id, params::Dict{String,<:Any}) + +Gets the status of a specified Gremlin query. When invoking this operation in a Neptune +cluster that has IAM authentication enabled, the IAM user or role making the request must +have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that +cluster. Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in +the policy document to restrict the use of Gremlin queries (see Condition keys available in +Neptune IAM data-access policy statements). + +# Arguments +- `query_id`: The unique identifier that identifies the Gremlin query. + +""" +function get_gremlin_query_status( + queryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/gremlin/status/$(queryId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_gremlin_query_status( + queryId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/gremlin/status/$(queryId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_loader_job_status(load_id) + get_loader_job_status(load_id, params::Dict{String,<:Any}) + +Gets status information about a specified load job. Neptune keeps track of the most recent +1,024 bulk load jobs, and stores the last 10,000 error details per job. See Neptune Loader +Get-Status API for more information. When invoking this operation in a Neptune cluster that +has IAM authentication enabled, the IAM user or role making the request must have a policy +attached that allows the neptune-db:GetLoaderJobStatus IAM action in that cluster.. + +# Arguments +- `load_id`: The load ID of the load job to get the status of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"details"`: Flag indicating whether or not to include details beyond the overall status + (TRUE or FALSE; the default is FALSE). +- `"errors"`: Flag indicating whether or not to include a list of errors encountered (TRUE + or FALSE; the default is FALSE). The list of errors is paged. The page and errorsPerPage + parameters allow you to page through all the errors. +- `"errorsPerPage"`: The number of errors returned in each page (a positive integer; the + default is 10). Only valid when the errors parameter set to TRUE. +- `"page"`: The error page number (a positive integer; the default is 1). Only valid when + the errors parameter is set to TRUE. +""" +function get_loader_job_status(loadId; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/loader/$(loadId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_loader_job_status( + loadId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/loader/$(loadId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_mldata_processing_job(id) + get_mldata_processing_job(id, params::Dict{String,<:Any}) + +Retrieves information about a specified data processing job. See The dataprocessing +command. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:neptune-db:GetMLDataProcessingJobStatus IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the data-processing job to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function get_mldata_processing_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/ml/dataprocessing/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_mldata_processing_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/dataprocessing/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_mlendpoint(id) + get_mlendpoint(id, params::Dict{String,<:Any}) + +Retrieves details about an inference endpoint. See Managing inference endpoints using the +endpoints command. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:GetMLEndpointStatus IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the inference endpoint. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function get_mlendpoint(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/ml/endpoints/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_mlendpoint( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/endpoints/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_mlmodel_training_job(id) + get_mlmodel_training_job(id, params::Dict{String,<:Any}) + +Retrieves information about a Neptune ML model training job. See Model training using the +modeltraining command. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:GetMLModelTrainingJobStatus IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the model-training job to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function get_mlmodel_training_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/ml/modeltraining/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_mlmodel_training_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/modeltraining/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_mlmodel_transform_job(id) + get_mlmodel_transform_job(id, params::Dict{String,<:Any}) + +Gets information about a specified model transform job. See Use a trained model to generate +new model artifacts. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:GetMLModelTransformJobStatus IAM action in that cluster. + +# Arguments +- `id`: The unique identifier of the model-transform job to be reetrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function get_mlmodel_transform_job(id; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/ml/modeltransform/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_mlmodel_transform_job( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/modeltransform/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_open_cypher_query_status(query_id) + get_open_cypher_query_status(query_id, params::Dict{String,<:Any}) + +Retrieves the status of a specified openCypher query. When invoking this operation in a +Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in +that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be +used in the policy document to restrict the use of openCypher queries (see Condition keys +available in Neptune IAM data-access policy statements). + +# Arguments +- `query_id`: The unique ID of the openCypher query for which to retrieve the query status. + +""" +function get_open_cypher_query_status( + queryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/opencypher/status/$(queryId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_open_cypher_query_status( + queryId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/opencypher/status/$(queryId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_propertygraph_statistics() + get_propertygraph_statistics(params::Dict{String,<:Any}) + +Gets property graph statistics (Gremlin and openCypher). When invoking this operation in a +Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:GetStatisticsStatus IAM +action in that cluster. + +""" +function get_propertygraph_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/propertygraph/statistics"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_propertygraph_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/propertygraph/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_propertygraph_stream() + get_propertygraph_stream(params::Dict{String,<:Any}) + +Gets a stream for a property graph. With the Neptune Streams feature, you can generate a +complete sequence of change-log entries that record every change made to your graph data as +it happens. GetPropertygraphStream lets you collect these change-log entries for a property +graph. The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable +streams, set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in +real time using Neptune streams. When invoking this operation in a Neptune cluster that has +IAM authentication enabled, the IAM user or role making the request must have a policy +attached that allows the neptune-db:GetStreamRecords IAM action in that cluster. When +invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM +user or role making the request must have a policy attached that enables one of the +following IAM actions, depending on the query: Note that you can restrict property-graph +queries using the following IAM context keys: neptune-db:QueryLanguage:Gremlin +neptune-db:QueryLanguage:OpenCypher See Condition keys available in Neptune IAM +data-access policy statements). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Accept-Encoding"`: If set to TRUE, Neptune compresses the response using gzip encoding. +- `"commitNum"`: The commit number of the starting record to read from the change-log + stream. This parameter is required when iteratorType isAT_SEQUENCE_NUMBER or + AFTER_SEQUENCE_NUMBER, and ignored when iteratorType is TRIM_HORIZON or LATEST. +- `"iteratorType"`: Can be one of: AT_SEQUENCE_NUMBER   –   Indicates that reading + should start from the event sequence number specified jointly by the commitNum and opNum + parameters. AFTER_SEQUENCE_NUMBER   –   Indicates that reading should start right + after the event sequence number specified jointly by the commitNum and opNum parameters. + TRIM_HORIZON   –   Indicates that reading should start at the last untrimmed record in + the system, which is the oldest unexpired (not yet deleted) record in the change-log + stream. LATEST   –   Indicates that reading should start at the most recent record + in the system, which is the latest unexpired (not yet deleted) record in the change-log + stream. +- `"limit"`: Specifies the maximum number of records to return. There is also a size limit + of 10 MB on the response that can't be modified and that takes precedence over the number + of records specified in the limit parameter. The response does include a + threshold-breaching record if the 10 MB limit was reached. The range for limit is 1 to + 100,000, with a default of 10. +- `"opNum"`: The operation sequence number within the specified commit to start reading + from in the change-log stream data. The default is 1. +""" +function get_propertygraph_stream(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/propertygraph/stream"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_propertygraph_stream( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/propertygraph/stream", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_propertygraph_summary() + get_propertygraph_summary(params::Dict{String,<:Any}) + +Gets a graph summary for a property graph. When invoking this operation in a Neptune +cluster that has IAM authentication enabled, the IAM user or role making the request must +have a policy attached that allows the neptune-db:GetGraphSummary IAM action in that +cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"mode"`: Mode can take one of two values: BASIC (the default), and DETAILED. +""" +function get_propertygraph_summary(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/propertygraph/statistics/summary"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_propertygraph_summary( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/propertygraph/statistics/summary", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_rdfgraph_summary() + get_rdfgraph_summary(params::Dict{String,<:Any}) + +Gets a graph summary for an RDF graph. When invoking this operation in a Neptune cluster +that has IAM authentication enabled, the IAM user or role making the request must have a +policy attached that allows the neptune-db:GetGraphSummary IAM action in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"mode"`: Mode can take one of two values: BASIC (the default), and DETAILED. +""" +function get_rdfgraph_summary(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", + "/rdf/statistics/summary"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_rdfgraph_summary( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/rdf/statistics/summary", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sparql_statistics() + get_sparql_statistics(params::Dict{String,<:Any}) + +Gets RDF statistics (SPARQL). + +""" +function get_sparql_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/sparql/statistics"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_sparql_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/sparql/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_sparql_stream() + get_sparql_stream(params::Dict{String,<:Any}) + +Gets a stream for an RDF graph. With the Neptune Streams feature, you can generate a +complete sequence of change-log entries that record every change made to your graph data as +it happens. GetSparqlStream lets you collect these change-log entries for an RDF graph. The +Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, +set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in real time +using Neptune streams. When invoking this operation in a Neptune cluster that has IAM +authentication enabled, the IAM user or role making the request must have a policy attached +that allows the neptune-db:GetStreamRecords IAM action in that cluster. Note that the +neptune-db:QueryLanguage:Sparql IAM condition key can be used in the policy document to +restrict the use of SPARQL queries (see Condition keys available in Neptune IAM data-access +policy statements). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Accept-Encoding"`: If set to TRUE, Neptune compresses the response using gzip encoding. +- `"commitNum"`: The commit number of the starting record to read from the change-log + stream. This parameter is required when iteratorType isAT_SEQUENCE_NUMBER or + AFTER_SEQUENCE_NUMBER, and ignored when iteratorType is TRIM_HORIZON or LATEST. +- `"iteratorType"`: Can be one of: AT_SEQUENCE_NUMBER   –   Indicates that reading + should start from the event sequence number specified jointly by the commitNum and opNum + parameters. AFTER_SEQUENCE_NUMBER   –   Indicates that reading should start right + after the event sequence number specified jointly by the commitNum and opNum parameters. + TRIM_HORIZON   –   Indicates that reading should start at the last untrimmed record in + the system, which is the oldest unexpired (not yet deleted) record in the change-log + stream. LATEST   –   Indicates that reading should start at the most recent record + in the system, which is the latest unexpired (not yet deleted) record in the change-log + stream. +- `"limit"`: Specifies the maximum number of records to return. There is also a size limit + of 10 MB on the response that can't be modified and that takes precedence over the number + of records specified in the limit parameter. The response does include a + threshold-breaching record if the 10 MB limit was reached. The range for limit is 1 to + 100,000, with a default of 10. +- `"opNum"`: The operation sequence number within the specified commit to start reading + from in the change-log stream data. The default is 1. +""" +function get_sparql_stream(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/sparql/stream"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_sparql_stream( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/sparql/stream", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_gremlin_queries() + list_gremlin_queries(params::Dict{String,<:Any}) + +Lists active Gremlin queries. See Gremlin query status API for details about the output. +When invoking this operation in a Neptune cluster that has IAM authentication enabled, the +IAM user or role making the request must have a policy attached that allows the +neptune-db:GetQueryStatus IAM action in that cluster. Note that the +neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to +restrict the use of Gremlin queries (see Condition keys available in Neptune IAM +data-access policy statements). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeWaiting"`: If set to TRUE, the list returned includes waiting queries. The + default is FALSE; +""" +function list_gremlin_queries(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/gremlin/status"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_gremlin_queries( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/gremlin/status", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_loader_jobs() + list_loader_jobs(params::Dict{String,<:Any}) + +Retrieves a list of the loadIds for all active loader jobs. When invoking this operation in +a Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:ListLoaderJobs IAM action in +that cluster.. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeQueuedLoads"`: An optional parameter that can be used to exclude the load IDs of + queued load requests when requesting a list of load IDs by setting the parameter to FALSE. + The default value is TRUE. +- `"limit"`: The number of load IDs to list. Must be a positive integer greater than zero + and not more than 100 (which is the default). +""" +function list_loader_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/loader"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_loader_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", "/loader", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_mldata_processing_jobs() + list_mldata_processing_jobs(params::Dict{String,<:Any}) + +Returns a list of Neptune ML data processing jobs. See Listing active data-processing jobs +using the Neptune ML dataprocessing command. When invoking this operation in a Neptune +cluster that has IAM authentication enabled, the IAM user or role making the request must +have a policy attached that allows the neptune-db:ListMLDataProcessingJobs IAM action in +that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return (from 1 to 1024; the default is 10). +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function list_mldata_processing_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/ml/dataprocessing"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_mldata_processing_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/dataprocessing", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_mlendpoints() + list_mlendpoints(params::Dict{String,<:Any}) + +Lists existing inference endpoints. See Managing inference endpoints using the endpoints +command. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:ListMLEndpoints IAM action in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return (from 1 to 1024; the default is 10. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function list_mlendpoints(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/ml/endpoints"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_mlendpoints( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/endpoints", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_mlmodel_training_jobs() + list_mlmodel_training_jobs(params::Dict{String,<:Any}) + +Lists Neptune ML model-training jobs. See Model training using the modeltraining command. +When invoking this operation in a Neptune cluster that has IAM authentication enabled, the +IAM user or role making the request must have a policy attached that allows the +neptune-db:neptune-db:ListMLModelTrainingJobs IAM action in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return (from 1 to 1024; the default is 10). +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function list_mlmodel_training_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/ml/modeltraining"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_mlmodel_training_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/modeltraining", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_mlmodel_transform_jobs() + list_mlmodel_transform_jobs(params::Dict{String,<:Any}) + +Returns a list of model transform job IDs. See Use a trained model to generate new model +artifacts. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:ListMLModelTransformJobs IAM action in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return (from 1 to 1024; the default is 10). +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +""" +function list_mlmodel_transform_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/ml/modeltransform"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_mlmodel_transform_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/ml/modeltransform", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_open_cypher_queries() + list_open_cypher_queries(params::Dict{String,<:Any}) + +Lists active openCypher queries. See Neptune openCypher status endpoint for more +information. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:GetQueryStatus IAM action in that cluster. Note that the +neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to +restrict the use of openCypher queries (see Condition keys available in Neptune IAM +data-access policy statements). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includeWaiting"`: When set to TRUE and other parameters are not present, causes status + information to be returned for waiting queries as well as for running queries. +""" +function list_open_cypher_queries(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "GET", "/opencypher/status"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_open_cypher_queries( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "GET", + "/opencypher/status", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + manage_propertygraph_statistics() + manage_propertygraph_statistics(params::Dict{String,<:Any}) + +Manages the generation and use of property graph statistics. When invoking this operation +in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:ManageStatistics IAM action +in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"mode"`: The statistics generation mode. One of: DISABLE_AUTOCOMPUTE, + ENABLE_AUTOCOMPUTE, or REFRESH, the last of which manually triggers DFE statistics + generation. +""" +function manage_propertygraph_statistics(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/propertygraph/statistics"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function manage_propertygraph_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/propertygraph/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + manage_sparql_statistics() + manage_sparql_statistics(params::Dict{String,<:Any}) + +Manages the generation and use of RDF graph statistics. When invoking this operation in a +Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the neptune-db:ManageStatistics IAM action +in that cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"mode"`: The statistics generation mode. One of: DISABLE_AUTOCOMPUTE, + ENABLE_AUTOCOMPUTE, or REFRESH, the last of which manually triggers DFE statistics + generation. +""" +function manage_sparql_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return neptunedata( + "POST", "/sparql/statistics"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function manage_sparql_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/sparql/statistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_loader_job(format, iam_role_arn, region, source) + start_loader_job(format, iam_role_arn, region, source, params::Dict{String,<:Any}) + +Starts a Neptune bulk loader job to load data from an Amazon S3 bucket into a Neptune DB +instance. See Using the Amazon Neptune Bulk Loader to Ingest Data. When invoking this +operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role +making the request must have a policy attached that allows the neptune-db:StartLoaderJob +IAM action in that cluster. + +# Arguments +- `format`: The format of the data. For more information about data formats for the Neptune + Loader command, see Load Data Formats. Allowed values csv for the Gremlin CSV data + format. opencypher for the openCypher CSV data format. ntriples for the N-Triples + RDF data format. nquads for the N-Quads RDF data format. rdfxml for the RDFXML + RDF data format. turtle for the Turtle RDF data format. +- `iam_role_arn`: The Amazon Resource Name (ARN) for an IAM role to be assumed by the + Neptune DB instance for access to the S3 bucket. The IAM role ARN provided here should be + attached to the DB cluster (see Adding the IAM Role to an Amazon Neptune Cluster. +- `region`: The Amazon region of the S3 bucket. This must match the Amazon Region of the DB + cluster. +- `source`: The source parameter accepts an S3 URI that identifies a single file, multiple + files, a folder, or multiple folders. Neptune loads every data file in any folder that is + specified. The URI can be in any of the following formats. + s3://(bucket_name)/(object-key-name) + https://s3.amazonaws.com/(bucket_name)/(object-key-name) + https://s3.us-east-1.amazonaws.com/(bucket_name)/(object-key-name) The object-key-name + element of the URI is equivalent to the prefix parameter in an S3 ListObjects API call. It + identifies all the objects in the specified S3 bucket whose names begin with that prefix. + That can be a single file or folder, or multiple files and/or folders. The specified folder + or folders can contain multiple vertex files and multiple edge files. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dependencies"`: This is an optional parameter that can make a queued load request + contingent on the successful completion of one or more previous jobs in the queue. Neptune + can queue up as many as 64 load requests at a time, if their queueRequest parameters are + set to \"TRUE\". The dependencies parameter lets you make execution of such a queued + request dependent on the successful completion of one or more specified previous requests + in the queue. For example, if load Job-A and Job-B are independent of each other, but load + Job-C needs Job-A and Job-B to be finished before it begins, proceed as follows: Submit + load-job-A and load-job-B one after another in any order, and save their load-ids. Submit + load-job-C with the load-ids of the two jobs in its dependencies field: Because of the + dependencies parameter, the bulk loader will not start Job-C until Job-A and Job-B have + completed successfully. If either one of them fails, Job-C will not be executed, and its + status will be set to LOAD_FAILED_BECAUSE_DEPENDENCY_NOT_SATISFIED. You can set up multiple + levels of dependency in this way, so that the failure of one job will cause all requests + that are directly or indirectly dependent on it to be cancelled. +- `"failOnError"`: failOnError   –   A flag to toggle a complete stop on an error. + Allowed values: \"TRUE\", \"FALSE\". Default value: \"TRUE\". When this parameter is set + to \"FALSE\", the loader tries to load all the data in the location specified, skipping any + entries with errors. When this parameter is set to \"TRUE\", the loader stops as soon as it + encounters an error. Data loaded up to that point persists. +- `"mode"`: The load job mode. Allowed values: RESUME, NEW, AUTO. Default value: AUTO. + RESUME   –   In RESUME mode, the loader looks for a previous load from this source, + and if it finds one, resumes that load job. If no previous load job is found, the loader + stops. The loader avoids reloading files that were successfully loaded in a previous job. + It only tries to process failed files. If you dropped previously loaded data from your + Neptune cluster, that data is not reloaded in this mode. If a previous load job loaded all + files from the same source successfully, nothing is reloaded, and the loader returns + success. NEW   –   In NEW mode, the creates a new load request regardless of any + previous loads. You can use this mode to reload all the data from a source after dropping + previously loaded data from your Neptune cluster, or to load new data available at the same + source. AUTO   –   In AUTO mode, the loader looks for a previous load job from the + same source, and if it finds one, resumes that job, just as in RESUME mode. If the loader + doesn't find a previous load job from the same source, it loads all data from the source, + just as in NEW mode. +- `"parallelism"`: The optional parallelism parameter can be set to reduce the number of + threads used by the bulk load process. Allowed values: LOW –   The number of threads + used is the number of available vCPUs divided by 8. MEDIUM –   The number of threads + used is the number of available vCPUs divided by 2. HIGH –   The number of threads + used is the same as the number of available vCPUs. OVERSUBSCRIBE –   The number of + threads used is the number of available vCPUs multiplied by 2. If this value is used, the + bulk loader takes up all available resources. This does not mean, however, that the + OVERSUBSCRIBE setting results in 100% CPU utilization. Because the load operation is I/O + bound, the highest CPU utilization to expect is in the 60% to 70% range. Default value: + HIGH The parallelism setting can sometimes result in a deadlock between threads when + loading openCypher data. When this happens, Neptune returns the LOAD_DATA_DEADLOCK error. + You can generally fix the issue by setting parallelism to a lower setting and retrying the + load command. +- `"parserConfiguration"`: parserConfiguration   –   An optional object with + additional parser configuration values. Each of the child parameters is also optional: + namedGraphUri   –   The default graph for all RDF formats when no graph is specified + (for non-quads formats and NQUAD entries with no graph). The default is + https://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph. baseUri   –   The base + URI for RDF/XML and Turtle formats. The default is https://aws.amazon.com/neptune/default. + allowEmptyStrings   –   Gremlin users need to be able to pass empty string + values(\"\") as node and edge properties when loading CSV data. If allowEmptyStrings is set + to false (the default), such empty strings are treated as nulls and are not loaded. If + allowEmptyStrings is set to true, the loader treats empty strings as valid property values + and loads them accordingly. +- `"queueRequest"`: This is an optional flag parameter that indicates whether the load + request can be queued up or not. You don't have to wait for one load job to complete + before issuing the next one, because Neptune can queue up as many as 64 jobs at a time, + provided that their queueRequest parameters are all set to \"TRUE\". The queue order of the + jobs will be first-in-first-out (FIFO). If the queueRequest parameter is omitted or set to + \"FALSE\", the load request will fail if another load job is already running. Allowed + values: \"TRUE\", \"FALSE\". Default value: \"FALSE\". +- `"updateSingleCardinalityProperties"`: updateSingleCardinalityProperties is an optional + parameter that controls how the bulk loader treats a new value for single-cardinality + vertex or edge properties. This is not supported for loading openCypher data. Allowed + values: \"TRUE\", \"FALSE\". Default value: \"FALSE\". By default, or when + updateSingleCardinalityProperties is explicitly set to \"FALSE\", the loader treats a new + value as an error, because it violates single cardinality. When + updateSingleCardinalityProperties is set to \"TRUE\", on the other hand, the bulk loader + replaces the existing value with the new one. If multiple edge or single-cardinality vertex + property values are provided in the source file(s) being loaded, the final value at the end + of the bulk load could be any one of those new values. The loader only guarantees that the + existing value has been replaced by one of the new ones. +- `"userProvidedEdgeIds"`: This parameter is required only when loading openCypher data + that contains relationship IDs. It must be included and set to True when openCypher + relationship IDs are explicitly provided in the load data (recommended). When + userProvidedEdgeIds is absent or set to True, an :ID column must be present in every + relationship file in the load. When userProvidedEdgeIds is present and set to False, + relationship files in the load must not contain an :ID column. Instead, the Neptune loader + automatically generates an ID for each relationship. It's useful to provide relationship + IDs explicitly so that the loader can resume loading after error in the CSV data have been + fixed, without having to reload any relationships that have already been loaded. If + relationship IDs have not been explicitly assigned, the loader cannot resume a failed load + if any relationship file has had to be corrected, and must instead reload all the + relationships. +""" +function start_loader_job( + format, iamRoleArn, region, source; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/loader", + Dict{String,Any}( + "format" => format, + "iamRoleArn" => iamRoleArn, + "region" => region, + "source" => source, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_loader_job( + format, + iamRoleArn, + region, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/loader", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "format" => format, + "iamRoleArn" => iamRoleArn, + "region" => region, + "source" => source, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_mldata_processing_job(input_data_s3_location, processed_data_s3_location) + start_mldata_processing_job(input_data_s3_location, processed_data_s3_location, params::Dict{String,<:Any}) + +Creates a new Neptune ML data processing job for processing the graph data exported from +Neptune for training. See The dataprocessing command. When invoking this operation in a +Neptune cluster that has IAM authentication enabled, the IAM user or role making the +request must have a policy attached that allows the +neptune-db:StartMLModelDataProcessingJob IAM action in that cluster. + +# Arguments +- `input_data_s3_location`: The URI of the Amazon S3 location where you want SageMaker to + download the data needed to run the data processing job. +- `processed_data_s3_location`: The URI of the Amazon S3 location where you want SageMaker + to save the results of a data processing job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configFileName"`: A data specification file that describes how to load the exported + graph data for training. The file is automatically generated by the Neptune export toolkit. + The default is training-data-configuration.json. +- `"id"`: A unique identifier for the new job. The default is an autogenerated UUID. +- `"modelType"`: One of the two model types that Neptune ML currently supports: + heterogeneous graph models (heterogeneous), and knowledge graph (kge). The default is none. + If not specified, Neptune ML chooses the model type automatically based on the data. +- `"neptuneIamRoleArn"`: The Amazon Resource Name (ARN) of an IAM role that SageMaker can + assume to perform tasks on your behalf. This must be listed in your DB cluster parameter + group or an error will occur. +- `"previousDataProcessingJobId"`: The job ID of a completed data processing job run on an + earlier version of the data. +- `"processingInstanceType"`: The type of ML instance used during data processing. Its + memory should be large enough to hold the processed dataset. The default is the smallest + ml.r5 type whose memory is ten times larger than the size of the exported graph data on + disk. +- `"processingInstanceVolumeSizeInGB"`: The disk volume size of the processing instance. + Both input data and processed data are stored on disk, so the volume size must be large + enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses + the volume size automatically based on the data size. +- `"processingTimeOutInSeconds"`: Timeout in seconds for the data processing job. The + default is 86,400 (1 day). +- `"s3OutputEncryptionKMSKey"`: The Amazon Key Management Service (Amazon KMS) key that + SageMaker uses to encrypt the output of the processing job. The default is none. +- `"sagemakerIamRoleArn"`: The ARN of an IAM role for SageMaker execution. This must be + listed in your DB cluster parameter group or an error will occur. +- `"securityGroupIds"`: The VPC security group IDs. The default is None. +- `"subnets"`: The IDs of the subnets in the Neptune VPC. The default is None. +- `"volumeEncryptionKMSKey"`: The Amazon Key Management Service (Amazon KMS) key that + SageMaker uses to encrypt data on the storage volume attached to the ML compute instances + that run the training job. The default is None. +""" +function start_mldata_processing_job( + inputDataS3Location, + processedDataS3Location; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/ml/dataprocessing", + Dict{String,Any}( + "inputDataS3Location" => inputDataS3Location, + "processedDataS3Location" => processedDataS3Location, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_mldata_processing_job( + inputDataS3Location, + processedDataS3Location, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/ml/dataprocessing", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputDataS3Location" => inputDataS3Location, + "processedDataS3Location" => processedDataS3Location, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_mlmodel_training_job(data_processing_job_id, train_model_s3_location) + start_mlmodel_training_job(data_processing_job_id, train_model_s3_location, params::Dict{String,<:Any}) + +Creates a new Neptune ML model training job. See Model training using the modeltraining +command. When invoking this operation in a Neptune cluster that has IAM authentication +enabled, the IAM user or role making the request must have a policy attached that allows +the neptune-db:StartMLModelTrainingJob IAM action in that cluster. + +# Arguments +- `data_processing_job_id`: The job ID of the completed data-processing job that has + created the data that the training will work with. +- `train_model_s3_location`: The location in Amazon S3 where the model artifacts are to be + stored. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"baseProcessingInstanceType"`: The type of ML instance used in preparing and managing + training of ML models. This is a CPU instance chosen based on memory requirements for + processing the training data and model. +- `"customModelTrainingParameters"`: The configuration for custom model training. This is a + JSON object. +- `"enableManagedSpotTraining"`: Optimizes the cost of training machine-learning models by + using Amazon Elastic Compute Cloud spot instances. The default is False. +- `"id"`: A unique identifier for the new job. The default is An autogenerated UUID. +- `"maxHPONumberOfTrainingJobs"`: Maximum total number of training jobs to start for the + hyperparameter tuning job. The default is 2. Neptune ML automatically tunes the + hyperparameters of the machine learning model. To obtain a model that performs well, use at + least 10 jobs (in other words, set maxHPONumberOfTrainingJobs to 10). In general, the more + tuning runs, the better the results. +- `"maxHPOParallelTrainingJobs"`: Maximum number of parallel training jobs to start for the + hyperparameter tuning job. The default is 2. The number of parallel jobs you can run is + limited by the available resources on your training instance. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +- `"previousModelTrainingJobId"`: The job ID of a completed model-training job that you + want to update incrementally based on updated data. +- `"s3OutputEncryptionKMSKey"`: The Amazon Key Management Service (KMS) key that SageMaker + uses to encrypt the output of the processing job. The default is none. +- `"sagemakerIamRoleArn"`: The ARN of an IAM role for SageMaker execution.This must be + listed in your DB cluster parameter group or an error will occur. +- `"securityGroupIds"`: The VPC security group IDs. The default is None. +- `"subnets"`: The IDs of the subnets in the Neptune VPC. The default is None. +- `"trainingInstanceType"`: The type of ML instance used for model training. All Neptune ML + models support CPU, GPU, and multiGPU training. The default is ml.p3.2xlarge. Choosing the + right instance type for training depends on the task type, graph size, and your budget. +- `"trainingInstanceVolumeSizeInGB"`: The disk volume size of the training instance. Both + input data and the output model are stored on disk, so the volume size must be large enough + to hold both data sets. The default is 0. If not specified or 0, Neptune ML selects a disk + volume size based on the recommendation generated in the data processing step. +- `"trainingTimeOutInSeconds"`: Timeout in seconds for the training job. The default is + 86,400 (1 day). +- `"volumeEncryptionKMSKey"`: The Amazon Key Management Service (KMS) key that SageMaker + uses to encrypt data on the storage volume attached to the ML compute instances that run + the training job. The default is None. +""" +function start_mlmodel_training_job( + dataProcessingJobId, + trainModelS3Location; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/ml/modeltraining", + Dict{String,Any}( + "dataProcessingJobId" => dataProcessingJobId, + "trainModelS3Location" => trainModelS3Location, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_mlmodel_training_job( + dataProcessingJobId, + trainModelS3Location, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/ml/modeltraining", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "dataProcessingJobId" => dataProcessingJobId, + "trainModelS3Location" => trainModelS3Location, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_mlmodel_transform_job(model_transform_output_s3_location) + start_mlmodel_transform_job(model_transform_output_s3_location, params::Dict{String,<:Any}) + +Creates a new model transform job. See Use a trained model to generate new model artifacts. +When invoking this operation in a Neptune cluster that has IAM authentication enabled, the +IAM user or role making the request must have a policy attached that allows the +neptune-db:StartMLModelTransformJob IAM action in that cluster. + +# Arguments +- `model_transform_output_s3_location`: The location in Amazon S3 where the model artifacts + are to be stored. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"baseProcessingInstanceType"`: The type of ML instance used in preparing and managing + training of ML models. This is an ML compute instance chosen based on memory requirements + for processing the training data and model. +- `"baseProcessingInstanceVolumeSizeInGB"`: The disk volume size of the training instance + in gigabytes. The default is 0. Both input data and the output model are stored on disk, so + the volume size must be large enough to hold both data sets. If not specified or 0, Neptune + ML selects a disk volume size based on the recommendation generated in the data processing + step. +- `"customModelTransformParameters"`: Configuration information for a model transform using + a custom model. The customModelTransformParameters object contains the following fields, + which must have values compatible with the saved model parameters from the training job: +- `"dataProcessingJobId"`: The job ID of a completed data-processing job. You must include + either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName. +- `"id"`: A unique identifier for the new job. The default is an autogenerated UUID. +- `"mlModelTrainingJobId"`: The job ID of a completed model-training job. You must include + either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName. +- `"neptuneIamRoleArn"`: The ARN of an IAM role that provides Neptune access to SageMaker + and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error + will occur. +- `"s3OutputEncryptionKMSKey"`: The Amazon Key Management Service (KMS) key that SageMaker + uses to encrypt the output of the processing job. The default is none. +- `"sagemakerIamRoleArn"`: The ARN of an IAM role for SageMaker execution. This must be + listed in your DB cluster parameter group or an error will occur. +- `"securityGroupIds"`: The VPC security group IDs. The default is None. +- `"subnets"`: The IDs of the subnets in the Neptune VPC. The default is None. +- `"trainingJobName"`: The name of a completed SageMaker training job. You must include + either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName. +- `"volumeEncryptionKMSKey"`: The Amazon Key Management Service (KMS) key that SageMaker + uses to encrypt data on the storage volume attached to the ML compute instances that run + the training job. The default is None. +""" +function start_mlmodel_transform_job( + modelTransformOutputS3Location; aws_config::AbstractAWSConfig=global_aws_config() +) + return neptunedata( + "POST", + "/ml/modeltransform", + Dict{String,Any}( + "modelTransformOutputS3Location" => modelTransformOutputS3Location + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_mlmodel_transform_job( + modelTransformOutputS3Location, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return neptunedata( + "POST", + "/ml/modeltransform", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "modelTransformOutputS3Location" => modelTransformOutputS3Location + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/network_firewall.jl b/src/services/network_firewall.jl index 3b2666c28b..332460ed1e 100644 --- a/src/services/network_firewall.jl +++ b/src/services/network_firewall.jl @@ -314,6 +314,10 @@ specification in your request using either RuleGroup or Rules. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnalyzeRuleGroup"`: Indicates whether you want Network Firewall to analyze the + stateless rules in the rule group for rule behavior such as asymmetric routing. If set to + TRUE, Network Firewall runs the analysis and then creates the rule group for you. To run + the stateless rule group analyzer without creating the rule group, set DryRun to TRUE. - `"Description"`: A description of the rule group. - `"DryRun"`: Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request. If set to TRUE, Network Firewall checks whether the @@ -378,16 +382,16 @@ end create_tlsinspection_configuration(tlsinspection_configuration, tlsinspection_configuration_name, params::Dict{String,<:Any}) Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration -contains the Certificate Manager certificate references that Network Firewall uses to -decrypt and re-encrypt inbound traffic. After you create a TLS inspection configuration, -you associate it with a firewall policy. To update the settings for a TLS inspection -configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection -configuration's tags, use the standard Amazon Web Services resource tagging operations, -ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS -inspection configurations, use ListTLSInspectionConfigurations and -DescribeTLSInspectionConfiguration. For more information about TLS inspection -configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the -Network Firewall Developer Guide. +contains Certificate Manager certificate associations between and the scope configurations +that Network Firewall uses to decrypt and re-encrypt traffic traveling through your +firewall. After you create a TLS inspection configuration, you can associate it with a new +firewall policy. To update the settings for a TLS inspection configuration, use +UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the +standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, +and UntagResource. To retrieve information about TLS inspection configurations, use +ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more +information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS +inspection configurations in the Network Firewall Developer Guide. # Arguments - `tlsinspection_configuration`: The object that defines a TLS inspection configuration. @@ -395,13 +399,13 @@ Network Firewall Developer Guide. configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration. Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its - destination. To use a TLS inspection configuration, you add it to a Network Firewall + destination. To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as - a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection - configuration from more than one firewall policy, and you can use a firewall policy in more - than one firewall. For more information about using TLS inspection configurations, see - Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall - Developer Guide. + a proxy service to decrypt and inspect the traffic traveling through your firewalls. You + can reference a TLS inspection configuration from more than one firewall policy, and you + can use a firewall policy in more than one firewall. For more information about using TLS + inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection + configurations in the Network Firewall Developer Guide. - `tlsinspection_configuration_name`: The descriptive name of the TLS inspection configuration. You can't change the name of a TLS inspection configuration after you create it. @@ -748,6 +752,9 @@ Returns the data objects for the specified rule group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnalyzeRuleGroup"`: Indicates whether you want Network Firewall to analyze the + stateless rules in the rule group for rule behavior such as asymmetric routing. If set to + TRUE, Network Firewall runs the analysis. - `"RuleGroupArn"`: The Amazon Resource Name (ARN) of the rule group. You must specify the ARN or the name, and you can specify both. - `"RuleGroupName"`: The descriptive name of the rule group. You can't change the name of a @@ -1393,7 +1400,9 @@ end Updates the properties of the specified firewall policy. # Arguments -- `firewall_policy`: The updated firewall policy to use for the firewall. +- `firewall_policy`: The updated firewall policy to use for the firewall. You can't add or + remove a TLSInspectionConfiguration after you create a firewall policy. However, you can + replace an existing TLS inspection configuration with another TLSInspectionConfiguration. - `update_token`: A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request. To make changes to the policy, you provide the token @@ -1585,6 +1594,10 @@ updated object to this call. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnalyzeRuleGroup"`: Indicates whether you want Network Firewall to analyze the + stateless rules in the rule group for rule behavior such as asymmetric routing. If set to + TRUE, Network Firewall runs the analysis and then updates the rule group for you. To run + the stateless rule group analyzer without updating the rule group, set DryRun to TRUE. - `"Description"`: A description of the rule group. - `"DryRun"`: Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request. If set to TRUE, Network Firewall checks whether the @@ -1704,11 +1717,12 @@ end update_tlsinspection_configuration(tlsinspection_configuration, update_token, params::Dict{String,<:Any}) Updates the TLS inspection configuration settings for the specified TLS inspection -configuration. You use a TLS inspection configuration by reference in one or more firewall -policies. When you modify a TLS inspection configuration, you modify all firewall policies -that use the TLS inspection configuration. To update a TLS inspection configuration, first -call DescribeTLSInspectionConfiguration to retrieve the current TLSInspectionConfiguration -object, update the object as needed, and then provide the updated object to this call. +configuration. You use a TLS inspection configuration by referencing it in one or more +firewall policies. When you modify a TLS inspection configuration, you modify all firewall +policies that use the TLS inspection configuration. To update a TLS inspection +configuration, first call DescribeTLSInspectionConfiguration to retrieve the current +TLSInspectionConfiguration object, update the object as needed, and then provide the +updated object to this call. # Arguments - `tlsinspection_configuration`: The object that defines a TLS inspection configuration. @@ -1716,13 +1730,13 @@ object, update the object as needed, and then provide the updated object to this configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration. Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its - destination. To use a TLS inspection configuration, you add it to a Network Firewall + destination. To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as - a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection - configuration from more than one firewall policy, and you can use a firewall policy in more - than one firewall. For more information about using TLS inspection configurations, see - Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall - Developer Guide. + a proxy service to decrypt and inspect the traffic traveling through your firewalls. You + can reference a TLS inspection configuration from more than one firewall policy, and you + can use a firewall policy in more than one firewall. For more information about using TLS + inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection + configurations in the Network Firewall Developer Guide. - `update_token`: A token used for optimistic locking. Network Firewall returns a token to your requests that access the TLS inspection configuration. The token marks the state of the TLS inspection configuration resource at the time of the request. To make changes to diff --git a/src/services/networkmanager.jl b/src/services/networkmanager.jl index 94eebeff00..6a34032924 100644 --- a/src/services/networkmanager.jl +++ b/src/services/networkmanager.jl @@ -330,8 +330,8 @@ function create_connect_attachment( end """ - create_connect_peer(connect_attachment_id, inside_cidr_blocks, peer_address) - create_connect_peer(connect_attachment_id, inside_cidr_blocks, peer_address, params::Dict{String,<:Any}) + create_connect_peer(connect_attachment_id, peer_address) + create_connect_peer(connect_attachment_id, peer_address, params::Dict{String,<:Any}) Creates a core network Connect peer for a specified core network connect attachment between a core network and an appliance. The peer address and transit gateway address must be the @@ -339,28 +339,28 @@ same IP address family (IPv4 or IPv6). # Arguments - `connect_attachment_id`: The ID of the connection attachment. -- `inside_cidr_blocks`: The inside IP addresses used for BGP peering. - `peer_address`: The Connect peer address. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BgpOptions"`: The Connect peer BGP options. +- `"BgpOptions"`: The Connect peer BGP options. This only applies only when the protocol is + GRE. - `"ClientToken"`: The client token associated with the request. -- `"CoreNetworkAddress"`: A Connect peer core network address. +- `"CoreNetworkAddress"`: A Connect peer core network address. This only applies only when + the protocol is GRE. +- `"InsideCidrBlocks"`: The inside IP addresses used for BGP peering. +- `"SubnetArn"`: The subnet ARN for the Connect peer. This only applies only when the + protocol is NO_ENCAP. - `"Tags"`: The tags associated with the peer request. """ function create_connect_peer( - ConnectAttachmentId, - InsideCidrBlocks, - PeerAddress; - aws_config::AbstractAWSConfig=global_aws_config(), + ConnectAttachmentId, PeerAddress; aws_config::AbstractAWSConfig=global_aws_config() ) return networkmanager( "POST", "/connect-peers", Dict{String,Any}( "ConnectAttachmentId" => ConnectAttachmentId, - "InsideCidrBlocks" => InsideCidrBlocks, "PeerAddress" => PeerAddress, "ClientToken" => string(uuid4()), ); @@ -370,7 +370,6 @@ function create_connect_peer( end function create_connect_peer( ConnectAttachmentId, - InsideCidrBlocks, PeerAddress, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -383,7 +382,6 @@ function create_connect_peer( _merge, Dict{String,Any}( "ConnectAttachmentId" => ConnectAttachmentId, - "InsideCidrBlocks" => InsideCidrBlocks, "PeerAddress" => PeerAddress, "ClientToken" => string(uuid4()), ), @@ -2025,10 +2023,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: The token for the next page of results. - `"resourceType"`: The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported - resource types for Network Manager: connection device link site The - following are the supported resource types for Amazon VPC: customer-gateway - transit-gateway transit-gateway-attachment transit-gateway-connect-peer - transit-gateway-route-table vpn-connection + resource types for Network Manager: attachment connect-peer connection + core-network device link peering site The following are the supported + resource types for Amazon VPC: customer-gateway transit-gateway + transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table + vpn-connection """ function get_network_resource_counts( globalNetworkId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2074,10 +2073,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"resourceArn"`: The ARN of the gateway. - `"resourceType"`: The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported - resource types for Network Manager: connection device link site The - following are the supported resource types for Amazon VPC: customer-gateway - transit-gateway transit-gateway-attachment transit-gateway-connect-peer - transit-gateway-route-table vpn-connection + resource types for Network Manager: attachment connect-peer connection + core-network device link peering site The following are the supported + resource types for Amazon VPC: customer-gateway transit-gateway + transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table + vpn-connection """ function get_network_resource_relationships( globalNetworkId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2124,17 +2124,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"registeredGatewayArn"`: The ARN of the gateway. - `"resourceArn"`: The ARN of the resource. - `"resourceType"`: The resource type. The following are the supported resource types for - Direct Connect: dxcon - The definition model is Connection. dx-gateway - The - definition model is DirectConnectGateway. dx-vif - The definition model is - VirtualInterface. The following are the supported resource types for Network Manager: - connection - The definition model is Connection. device - The definition model is - Device. link - The definition model is Link. site - The definition model is Site. - The following are the supported resource types for Amazon VPC: customer-gateway - The - definition model is CustomerGateway. transit-gateway - The definition model is - TransitGateway. transit-gateway-attachment - The definition model is - TransitGatewayAttachment. transit-gateway-connect-peer - The definition model is - TransitGatewayConnectPeer. transit-gateway-route-table - The definition model is - TransitGatewayRouteTable. vpn-connection - The definition model is VpnConnection. + Direct Connect: dxcon dx-gateway dx-vif The following are the supported + resource types for Network Manager: attachment connect-peer connection + core-network device link peering site The following are the supported + resource types for Amazon VPC: customer-gateway transit-gateway + transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table + vpn-connection """ function get_network_resources( globalNetworkId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2234,12 +2229,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: The token for the next page of results. - `"registeredGatewayArn"`: The ARN of the gateway. - `"resourceArn"`: The ARN of the resource. -- `"resourceType"`: The resource type. The following are the supported resource types for - Direct Connect: dxcon dx-gateway dx-vif The following are the supported - resource types for Network Manager: connection device link site The - following are the supported resource types for Amazon VPC: customer-gateway - transit-gateway transit-gateway-attachment transit-gateway-connect-peer - transit-gateway-route-table vpn-connection +- `"resourceType"`: The resource type. The following are the supported resource types: + connect-peer transit-gateway-connect-peer vpn-connection """ function get_network_telemetry( globalNetworkId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/networkmonitor.jl b/src/services/networkmonitor.jl new file mode 100644 index 0000000000..2d0c22924c --- /dev/null +++ b/src/services/networkmonitor.jl @@ -0,0 +1,489 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: networkmonitor +using AWS.Compat +using AWS.UUIDs + +""" + create_monitor(monitor_name) + create_monitor(monitor_name, params::Dict{String,<:Any}) + +Creates a monitor between a source subnet and destination IP address. Within a monitor +you'll create one or more probes that monitor network traffic between your source Amazon +Web Services VPC subnets and your destination IP addresses. Each probe then aggregates and +sends metrics to Amazon CloudWatch. You can also create a monitor with probes using this +command. For each probe, you define the following: source—The subnet IDs where the +probes will be created. destination— The target destination IP address for the probe. + destinationPort—Required only if the protocol is TCP. protocol—The communication +protocol between the source and destination. This will be either TCP or ICMP. +packetSize—The size of the packets. This must be a number between 56 and 8500. +(Optional) tags —Key-value pairs created and assigned to the probe. + +# Arguments +- `monitor_name`: The name identifying the monitor. It can contain only letters, + underscores (_), or dashes (-), and can be up to 200 characters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"aggregationPeriod"`: The time, in seconds, that metrics are aggregated and sent to + Amazon CloudWatch. Valid values are either 30 or 60. 60 is the default if no period is + chosen. +- `"clientToken"`: Unique, case-sensitive identifier to ensure the idempotency of the + request. Only returned if a client token was provided in the request. +- `"probes"`: Displays a list of all of the probes created for a monitor. +- `"tags"`: The list of key-value pairs created and assigned to the monitor. +""" +function create_monitor(monitorName; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "POST", + "/monitors", + Dict{String,Any}("monitorName" => monitorName, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_monitor( + monitorName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "POST", + "/monitors", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "monitorName" => monitorName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_probe(monitor_name, probe) + create_probe(monitor_name, probe, params::Dict{String,<:Any}) + +Create a probe within a monitor. Once you create a probe, and it begins monitoring your +network traffic, you'll incur billing charges for that probe. This action requires the +monitorName parameter. Run ListMonitors to get a list of monitor names. Note the name of +the monitorName you want to create the probe for. + +# Arguments +- `monitor_name`: The name of the monitor to associated with the probe. +- `probe`: Describes the details of an individual probe for a monitor. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Unique, case-sensitive identifier to ensure the idempotency of the + request. Only returned if a client token was provided in the request. +- `"tags"`: The list of key-value pairs created and assigned to the probe. +""" +function create_probe(monitorName, probe; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "POST", + "/monitors/$(monitorName)/probes", + Dict{String,Any}("probe" => probe, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_probe( + monitorName, + probe, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "POST", + "/monitors/$(monitorName)/probes", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("probe" => probe, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_monitor(monitor_name) + delete_monitor(monitor_name, params::Dict{String,<:Any}) + +Deletes a specified monitor. This action requires the monitorName parameter. Run +ListMonitors to get a list of monitor names. + +# Arguments +- `monitor_name`: The name of the monitor to delete. + +""" +function delete_monitor(monitorName; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "DELETE", + "/monitors/$(monitorName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_monitor( + monitorName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "DELETE", + "/monitors/$(monitorName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_probe(monitor_name, probe_id) + delete_probe(monitor_name, probe_id, params::Dict{String,<:Any}) + +Deletes the specified probe. Once a probe is deleted you'll no longer incur any billing +fees for that probe. This action requires both the monitorName and probeId parameters. Run +ListMonitors to get a list of monitor names. Run GetMonitor to get a list of probes and +probe IDs. You can only delete a single probe at a time using this action. + +# Arguments +- `monitor_name`: The name of the monitor to delete. +- `probe_id`: The ID of the probe to delete. + +""" +function delete_probe( + monitorName, probeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "DELETE", + "/monitors/$(monitorName)/probes/$(probeId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_probe( + monitorName, + probeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "DELETE", + "/monitors/$(monitorName)/probes/$(probeId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_monitor(monitor_name) + get_monitor(monitor_name, params::Dict{String,<:Any}) + +Returns details about a specific monitor. This action requires the monitorName parameter. +Run ListMonitors to get a list of monitor names. + +# Arguments +- `monitor_name`: The name of the monitor that details are returned for. + +""" +function get_monitor(monitorName; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "GET", + "/monitors/$(monitorName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_monitor( + monitorName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "GET", + "/monitors/$(monitorName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_probe(monitor_name, probe_id) + get_probe(monitor_name, probe_id, params::Dict{String,<:Any}) + +Returns the details about a probe. This action requires both the monitorName and probeId +parameters. Run ListMonitors to get a list of monitor names. Run GetMonitor to get a list +of probes and probe IDs. + +# Arguments +- `monitor_name`: The name of the monitor associated with the probe. Run ListMonitors to + get a list of monitor names. +- `probe_id`: The ID of the probe to get information about. Run GetMonitor action to get a + list of probes and probe IDs for the monitor. + +""" +function get_probe(monitorName, probeId; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "GET", + "/monitors/$(monitorName)/probes/$(probeId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_probe( + monitorName, + probeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "GET", + "/monitors/$(monitorName)/probes/$(probeId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_monitors() + list_monitors(params::Dict{String,<:Any}) + +Returns a list of all of your monitors. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. If MaxResults + is given a value larger than 100, only 100 results are returned. +- `"nextToken"`: The token for the next page of results. +- `"state"`: The list of all monitors and their states. +""" +function list_monitors(; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "GET", "/monitors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_monitors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "GET", "/monitors", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags assigned to this resource. + +# Arguments +- `resource_arn`: The + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds key-value pairs to a monitor or probe. + +# Arguments +- `resource_arn`: The ARN of the monitor or probe to tag. +- `tags`: The list of key-value pairs assigned to the monitor or probe. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return networkmonitor( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a key-value pair from a monitor or probe. + +# Arguments +- `resource_arn`: The ARN of the monitor or probe that the tag should be removed from. +- `tag_keys`: The key-value pa + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_monitor(aggregation_period, monitor_name) + update_monitor(aggregation_period, monitor_name, params::Dict{String,<:Any}) + +Updates the aggregationPeriod for a monitor. Monitors support an aggregationPeriod of +either 30 or 60 seconds. This action requires the monitorName and probeId parameter. Run +ListMonitors to get a list of monitor names. + +# Arguments +- `aggregation_period`: The aggregation time, in seconds, to change to. This must be either + 30 or 60. +- `monitor_name`: The name of the monitor to update. + +""" +function update_monitor( + aggregationPeriod, monitorName; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "PATCH", + "/monitors/$(monitorName)", + Dict{String,Any}("aggregationPeriod" => aggregationPeriod); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_monitor( + aggregationPeriod, + monitorName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "PATCH", + "/monitors/$(monitorName)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("aggregationPeriod" => aggregationPeriod), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_probe(monitor_name, probe_id) + update_probe(monitor_name, probe_id, params::Dict{String,<:Any}) + +Updates a monitor probe. This action requires both the monitorName and probeId parameters. +Run ListMonitors to get a list of monitor names. Run GetMonitor to get a list of probes and +probe IDs. You can update the following para create a monitor with probes using this +command. For each probe, you define the following: state—The state of the probe. +destination— The target destination IP address for the probe. +destinationPort—Required only if the protocol is TCP. protocol—The communication +protocol between the source and destination. This will be either TCP or ICMP. +packetSize—The size of the packets. This must be a number between 56 and 8500. +(Optional) tags —Key-value pairs created and assigned to the probe. + +# Arguments +- `monitor_name`: The name of the monitor that the probe was updated for. +- `probe_id`: The ID of the probe to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"destination"`: The updated IP address for the probe destination. This must be either an + IPv4 or IPv6 address. +- `"destinationPort"`: The updated port for the probe destination. This is required only if + the protocol is TCP and must be a number between 1 and 65536. +- `"packetSize"`: he updated packets size for network traffic between the source and + destination. This must be a number between 56 and 8500. +- `"protocol"`: The updated network protocol for the destination. This can be either TCP or + ICMP. If the protocol is TCP, then port is also required. +- `"state"`: The state of the probe update. +""" +function update_probe( + monitorName, probeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return networkmonitor( + "PATCH", + "/monitors/$(monitorName)/probes/$(probeId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_probe( + monitorName, + probeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return networkmonitor( + "PATCH", + "/monitors/$(monitorName)/probes/$(probeId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/oam.jl b/src/services/oam.jl index cffd805daf..d790345944 100644 --- a/src/services/oam.jl +++ b/src/services/oam.jl @@ -9,6 +9,9 @@ using AWS.UUIDs create_link(label_template, resource_types, sink_identifier, params::Dict{String,<:Any}) Creates a link between a source account and a sink that you have created in a monitoring +account. After the link is created, data is sent from the source account to the monitoring +account. When you create a link, you can optionally specify filters that specify which +metric namespaces and which log groups are shared from the source account to the monitoring account. Before you create a link, you must create a sink in the monitoring account and create a sink policy in that account. The sink policy must permit the source account to link to it. You can grant permission to source accounts by granting permission to an entire @@ -29,6 +32,9 @@ Each source account can be linked to as many as five monitoring accounts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkConfiguration"`: Use this structure to optionally create filters that specify that + only some metric namespaces or log groups are to be shared from the source account to the + monitoring account. - `"Tags"`: Assigns one or more tags (key-value pairs) to the link. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. For @@ -88,7 +94,8 @@ account in CloudWatch cross-account observability. A sink is a resource that rep attachment point in a monitoring account. Source accounts can link to the sink to send observability data. After you create a sink, you must create a sink policy that allows source accounts to attach to it. For more information, see PutSinkPolicy. Each account can -contain one sink. If you delete a sink, you can then create a new one in that account. +contain one sink per Region. If you delete a sink, you can then create a new one in that +Region. # Arguments - `name`: A name for the sink. @@ -443,8 +450,9 @@ to the monitoring account sink. When you create a sink policy, you can grant per all accounts in an organization or to individual accounts. You can also use a sink policy to limit the types of data that is shared. The three types that you can allow or deny are: Metrics - Specify with AWS::CloudWatch::Metric Log groups - Specify with -AWS::Logs::LogGroup Traces - Specify with AWS::XRay::Trace See the examples in this -section to see how to specify permitted source accounts and data types. +AWS::Logs::LogGroup Traces - Specify with AWS::XRay::Trace Application Insights - +Applications - Specify with AWS::ApplicationInsights::Application See the examples in +this section to see how to specify permitted source accounts and data types. # Arguments - `policy`: The JSON policy to use. If you are updating an existing policy, the entire @@ -584,7 +592,10 @@ end Use this operation to change what types of data are shared from a source account to its linked monitoring account sink. You can't change the sink or change the monitoring account -with this operation. To update the list of tags associated with the sink, use TagResource. +with this operation. When you update a link, you can optionally specify filters that +specify which metric namespaces and which log groups are shared from the source account to +the monitoring account. To update the list of tags associated with the sink, use +TagResource. # Arguments - `identifier`: The ARN of the link that you want to update. @@ -592,6 +603,10 @@ with this operation. To update the list of tags associated with the sink, use Ta account will send to the monitoring account. Your input here replaces the current set of data types that are shared. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkConfiguration"`: Use this structure to filter which metric namespaces and which log + groups are to be shared from the source account to the monitoring account. """ function update_link( Identifier, ResourceTypes; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/omics.jl b/src/services/omics.jl index d0b1719fe9..09f1fa0c4d 100644 --- a/src/services/omics.jl +++ b/src/services/omics.jl @@ -8,12 +8,11 @@ using AWS.UUIDs abort_multipart_read_set_upload(sequence_store_id, upload_id) abort_multipart_read_set_upload(sequence_store_id, upload_id, params::Dict{String,<:Any}) - Stops a multipart upload. +Stops a multipart upload. # Arguments -- `sequence_store_id`: The sequence store ID for the store involved in the multipart - upload. -- `upload_id`: The ID for the multipart upload. +- `sequence_store_id`: The sequence store ID for the store involved in the multipart upload. +- `upload_id`: The ID for the multipart upload. """ function abort_multipart_read_set_upload( @@ -41,6 +40,33 @@ function abort_multipart_read_set_upload( ) end +""" + accept_share(share_id) + accept_share(share_id, params::Dict{String,<:Any}) + +Accept a resource share request. + +# Arguments +- `share_id`: The ID of the resource share. + +""" +function accept_share(shareId; aws_config::AbstractAWSConfig=global_aws_config()) + return omics( + "POST", "/share/$(shareId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function accept_share( + shareId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/share/$(shareId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_delete_read_set(ids, sequence_store_id) batch_delete_read_set(ids, sequence_store_id, params::Dict{String,<:Any}) @@ -171,13 +197,12 @@ end complete_multipart_read_set_upload(parts, sequence_store_id, upload_id) complete_multipart_read_set_upload(parts, sequence_store_id, upload_id, params::Dict{String,<:Any}) - Concludes a multipart upload once you have uploaded all the components. +Concludes a multipart upload once you have uploaded all the components. # Arguments -- `parts`: The individual uploads or parts of a multipart upload. -- `sequence_store_id`: The sequence store ID for the store involved in the multipart - upload. -- `upload_id`: The ID for the multipart upload. +- `parts`: The individual uploads or parts of a multipart upload. +- `sequence_store_id`: The sequence store ID for the store involved in the multipart upload. +- `upload_id`: The ID for the multipart upload. """ function complete_multipart_read_set_upload( @@ -224,6 +249,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"sseConfig"`: Server-side encryption (SSE) settings for the store. - `"storeOptions"`: File parsing options for the annotation store. - `"tags"`: Tags for the store. +- `"versionName"`: The name given to an annotation store version to distinguish it from + other versions. """ function create_annotation_store( storeFormat; aws_config::AbstractAWSConfig=global_aws_config() @@ -253,31 +280,75 @@ function create_annotation_store( end """ - create_multipart_read_set_upload(name, reference_arn, sample_id, sequence_store_id, source_file_type, subject_id) - create_multipart_read_set_upload(name, reference_arn, sample_id, sequence_store_id, source_file_type, subject_id, params::Dict{String,<:Any}) + create_annotation_store_version(name, version_name) + create_annotation_store_version(name, version_name, params::Dict{String,<:Any}) - Begins a multipart read set upload. + Creates a new version of an annotation store. # Arguments -- `name`: The name of the read set. -- `reference_arn`: The ARN of the reference. -- `sample_id`: The source's sample ID. -- `sequence_store_id`: The sequence store ID for the store that is the destination of the +- `name`: The name of an annotation store version from which versions are being created. +- `version_name`: The name given to an annotation store version to distinguish it from + other versions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of an annotation store version. +- `"tags"`: Any tags added to annotation store version. +- `"versionOptions"`: The options for an annotation store version. +""" +function create_annotation_store_version( + name, versionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/annotationStore/$(name)/version", + Dict{String,Any}("versionName" => versionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_annotation_store_version( + name, + versionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "POST", + "/annotationStore/$(name)/version", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("versionName" => versionName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_multipart_read_set_upload(name, sample_id, sequence_store_id, source_file_type, subject_id) + create_multipart_read_set_upload(name, sample_id, sequence_store_id, source_file_type, subject_id, params::Dict{String,<:Any}) + +Begins a multipart read set upload. + +# Arguments +- `name`: The name of the read set. +- `sample_id`: The source's sample ID. +- `sequence_store_id`: The sequence store ID for the store that is the destination of the multipart uploads. -- `source_file_type`: The type of file being uploaded. -- `subject_id`: The source's subject ID. +- `source_file_type`: The type of file being uploaded. +- `subject_id`: The source's subject ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: An idempotency token that can be used to avoid triggering multiple +- `"clientToken"`: An idempotency token that can be used to avoid triggering multiple multipart uploads. -- `"description"`: The description of the read set. -- `"generatedFrom"`: Where the source originated. -- `"tags"`: Any tags to add to the read set. +- `"description"`: The description of the read set. +- `"generatedFrom"`: Where the source originated. +- `"referenceArn"`: The ARN of the reference. +- `"tags"`: Any tags to add to the read set. """ function create_multipart_read_set_upload( name, - referenceArn, sampleId, sequenceStoreId, sourceFileType, @@ -289,7 +360,6 @@ function create_multipart_read_set_upload( "/sequencestore/$(sequenceStoreId)/upload", Dict{String,Any}( "name" => name, - "referenceArn" => referenceArn, "sampleId" => sampleId, "sourceFileType" => sourceFileType, "subjectId" => subjectId, @@ -300,7 +370,6 @@ function create_multipart_read_set_upload( end function create_multipart_read_set_upload( name, - referenceArn, sampleId, sequenceStoreId, sourceFileType, @@ -316,7 +385,6 @@ function create_multipart_read_set_upload( _merge, Dict{String,Any}( "name" => name, - "referenceArn" => referenceArn, "sampleId" => sampleId, "sourceFileType" => sourceFileType, "subjectId" => subjectId, @@ -381,7 +449,7 @@ Creates a run group. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxCpus"`: The maximum number of CPUs to use in the group. - `"maxDuration"`: A maximum run time for the group in minutes. -- `"maxGpus"`: The maximum GPUs that can be used by a run group. +- `"maxGpus"`: The maximum GPUs that can be used by a run group. - `"maxRuns"`: The maximum number of concurrent runs for the group. - `"name"`: A name for the group. - `"tags"`: Tags for the group. @@ -425,7 +493,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientToken"`: To ensure that requests don't run multiple times, specify a unique token for each request. - `"description"`: A description for the store. -- `"fallbackLocation"`: An S3 location that is used to store files that have failed a +- `"eTagAlgorithmFamily"`: The ETag algorithm family to use for ingested read sets. +- `"fallbackLocation"`: An S3 location that is used to store files that have failed a direct upload. - `"sseConfig"`: Server-side encryption (SSE) settings for the store. - `"tags"`: Tags for the store. @@ -451,6 +520,61 @@ function create_sequence_store( ) end +""" + create_share(principal_subscriber, resource_arn) + create_share(principal_subscriber, resource_arn, params::Dict{String,<:Any}) + +Creates a cross-account shared resource. The resource owner makes an offer to share the +resource with the principal subscriber (an AWS user with a different account than the +resource owner). The following resources support cross-account sharing: Healthomics +variant stores Healthomics annotation stores Private workflows + +# Arguments +- `principal_subscriber`: The principal subscriber is the account being offered shared + access to the resource. +- `resource_arn`: The ARN of the resource to be shared. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"shareName"`: A name that the owner defines for the share. +""" +function create_share( + principalSubscriber, resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/share", + Dict{String,Any}( + "principalSubscriber" => principalSubscriber, "resourceArn" => resourceArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_share( + principalSubscriber, + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "POST", + "/share", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "principalSubscriber" => principalSubscriber, + "resourceArn" => resourceArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_variant_store(reference) create_variant_store(reference, params::Dict{String,<:Any}) @@ -504,7 +628,7 @@ Creates a workflow. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"accelerators"`: The computational accelerator specified to run the workflow. +- `"accelerators"`: The computational accelerator specified to run the workflow. - `"definitionUri"`: The URI of a definition for the workflow. - `"definitionZip"`: A ZIP archive for the workflow. - `"description"`: A description for the workflow. @@ -512,7 +636,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"main"`: The path of the main definition file for the workflow. - `"name"`: A name for the workflow. - `"parameterTemplate"`: A parameter template for the workflow. -- `"storageCapacity"`: A storage capacity for the workflow in gigabytes. +- `"storageCapacity"`: The storage capacity for the workflow in gibibytes. - `"tags"`: Tags for the workflow. """ function create_workflow(requestId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -573,6 +697,49 @@ function delete_annotation_store( ) end +""" + delete_annotation_store_versions(name, versions) + delete_annotation_store_versions(name, versions, params::Dict{String,<:Any}) + + Deletes one or multiple versions of an annotation store. + +# Arguments +- `name`: The name of the annotation store from which versions are being deleted. +- `versions`: The versions of an annotation store to be deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Forces the deletion of an annotation store version when imports are + in-progress.. +""" +function delete_annotation_store_versions( + name, versions; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/annotationStore/$(name)/versions/delete", + Dict{String,Any}("versions" => versions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_annotation_store_versions( + name, + versions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "POST", + "/annotationStore/$(name)/versions/delete", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("versions" => versions), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_reference(id, reference_store_id) delete_reference(id, reference_store_id, params::Dict{String,<:Any}) @@ -723,6 +890,38 @@ function delete_sequence_store( ) end +""" + delete_share(share_id) + delete_share(share_id, params::Dict{String,<:Any}) + +Deletes a resource share. If you are the resource owner, the subscriber will no longer have +access to the shared resource. If you are the subscriber, this operation deletes your +access to the share. + +# Arguments +- `share_id`: The ID for the resource share to be deleted. + +""" +function delete_share(shareId; aws_config::AbstractAWSConfig=global_aws_config()) + return omics( + "DELETE", + "/share/$(shareId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_share( + shareId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "DELETE", + "/share/$(shareId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_variant_store(name) delete_variant_store(name, params::Dict{String,<:Any}) @@ -843,6 +1042,43 @@ function get_annotation_store( ) end +""" + get_annotation_store_version(name, version_name) + get_annotation_store_version(name, version_name, params::Dict{String,<:Any}) + + Retrieves the metadata for an annotation store version. + +# Arguments +- `name`: The name given to an annotation store version to distinguish it from others. +- `version_name`: The name given to an annotation store version to distinguish it from + others. + +""" +function get_annotation_store_version( + name, versionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "GET", + "/annotationStore/$(name)/version/$(versionName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_annotation_store_version( + name, + versionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "GET", + "/annotationStore/$(name)/version/$(versionName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_read_set(id, part_number, sequence_store_id) get_read_set(id, part_number, sequence_store_id, params::Dict{String,<:Any}) @@ -1182,7 +1418,8 @@ end get_run(id) get_run(id, params::Dict{String,<:Any}) -Gets information about a workflow run. +Gets information about a workflow run. If a workflow is shared with you, you cannot export +information about the run. # Arguments - `id`: The run's ID. @@ -1238,7 +1475,7 @@ end Gets information about a workflow run task. # Arguments -- `id`: The task's ID. +- `id`: The workflow run ID. - `task_id`: The task's ID. """ @@ -1295,6 +1532,33 @@ function get_sequence_store( ) end +""" + get_share(share_id) + get_share(share_id, params::Dict{String,<:Any}) + +Retrieves the metadata for the specified resource share. + +# Arguments +- `share_id`: The ID of the share. + +""" +function get_share(shareId; aws_config::AbstractAWSConfig=global_aws_config()) + return omics( + "GET", "/share/$(shareId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_share( + shareId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "GET", + "/share/$(shareId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_variant_import_job(job_id) get_variant_import_job(job_id, params::Dict{String,<:Any}) @@ -1359,7 +1623,8 @@ end get_workflow(id) get_workflow(id, params::Dict{String,<:Any}) -Gets information about a workflow. +Gets information about a workflow. If a workflow is shared with you, you cannot export the +workflow. # Arguments - `id`: The workflow's ID. @@ -1368,6 +1633,7 @@ Gets information about a workflow. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"export"`: The export format for the workflow. - `"type"`: The workflow's type. +- `"workflowOwnerId"`: The ID of the workflow owner. """ function get_workflow(id; aws_config::AbstractAWSConfig=global_aws_config()) return omics( @@ -1397,8 +1663,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filter"`: A filter to apply to the list. - `"ids"`: IDs of annotation import jobs to retrieve. - `"maxResults"`: The maximum number of jobs to return in one page of results. -- `"nextToken"`: Specify the pagination token from a previous request to retrieve the next - page of results. +- `"nextToken"`: Specifies the pagination token from a previous request to retrieve the + next page of results. """ function list_annotation_import_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) return omics( @@ -1420,6 +1686,45 @@ function list_annotation_import_jobs( ) end +""" + list_annotation_store_versions(name) + list_annotation_store_versions(name, params::Dict{String,<:Any}) + + Lists the versions of an annotation store. + +# Arguments +- `name`: The name of an annotation store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: A filter to apply to the list of annotation store versions. +- `"maxResults"`: The maximum number of annotation store versions to return in one page of + results. +- `"nextToken"`: Specifies the pagination token from a previous request to retrieve the + next page of results. +""" +function list_annotation_store_versions( + name; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/annotationStore/$(name)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_annotation_store_versions( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/annotationStore/$(name)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_annotation_stores() list_annotation_stores(params::Dict{String,<:Any}) @@ -1455,15 +1760,16 @@ end list_multipart_read_set_uploads(sequence_store_id) list_multipart_read_set_uploads(sequence_store_id, params::Dict{String,<:Any}) - Lists all multipart read set uploads and their statuses. +Lists multipart read set uploads and for in progress uploads. Once the upload is completed, +a read set is created and the upload will no longer be returned in the response. # Arguments -- `sequence_store_id`: The Sequence Store ID used for the multipart uploads. +- `sequence_store_id`: The Sequence Store ID used for the multipart uploads. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of multipart uploads returned in a page. -- `"nextToken"`: Next token returned in the response of a previous +- `"maxResults"`: The maximum number of multipart uploads returned in a page. +- `"nextToken"`: Next token returned in the response of a previous ListMultipartReadSetUploads call. Used to get the next page of results. """ function list_multipart_read_set_uploads( @@ -1615,18 +1921,18 @@ end list_read_set_upload_parts(part_source, sequence_store_id, upload_id) list_read_set_upload_parts(part_source, sequence_store_id, upload_id, params::Dict{String,<:Any}) - This operation will list all parts in a requested multipart upload for a sequence store. +This operation will list all parts in a requested multipart upload for a sequence store. # Arguments -- `part_source`: The source file for the upload part. -- `sequence_store_id`: The Sequence Store ID used for the multipart uploads. -- `upload_id`: The ID for the initiated multipart upload. +- `part_source`: The source file for the upload part. +- `sequence_store_id`: The Sequence Store ID used for the multipart uploads. +- `upload_id`: The ID for the initiated multipart upload. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filter"`: Attributes used to filter for a specific subset of read set part uploads. -- `"maxResults"`: The maximum number of read set upload parts returned in a page. -- `"nextToken"`: Next token returned in the response of a previous +- `"filter"`: Attributes used to filter for a specific subset of read set part uploads. +- `"maxResults"`: The maximum number of read set upload parts returned in a page. +- `"nextToken"`: Next token returned in the response of a previous ListReadSetUploadPartsRequest call. Used to get the next page of results. """ function list_read_set_upload_parts( @@ -1876,7 +2182,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"runGroupId"`: Filter the list by run group ID. - `"startingToken"`: Specify the pagination token from a previous request to retrieve the next page of results. -- `"status"`: The status of a run. +- `"status"`: The status of a run. """ function list_runs(; aws_config::AbstractAWSConfig=global_aws_config()) return omics("GET", "/run"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -1919,6 +2225,48 @@ function list_sequence_stores( ) end +""" + list_shares(resource_owner) + list_shares(resource_owner, params::Dict{String,<:Any}) + +Retrieves the resource shares associated with an account. Use the filter parameter to +retrieve a specific subset of the shares. + +# Arguments +- `resource_owner`: The account that owns the resource shares. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: Attributes that you use to filter for a specific subset of resource shares. +- `"maxResults"`: The maximum number of shares to return in one page of results. +- `"nextToken"`: Next token returned in the response of a previous + ListReadSetUploadPartsRequest call. Used to get the next page of results. +""" +function list_shares(resourceOwner; aws_config::AbstractAWSConfig=global_aws_config()) + return omics( + "POST", + "/shares", + Dict{String,Any}("resourceOwner" => resourceOwner); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_shares( + resourceOwner, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "POST", + "/shares", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceOwner" => resourceOwner), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -2024,10 +2372,10 @@ Retrieves a list of workflows. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of workflows to return in one page of results. -- `"name"`: The workflows' name. +- `"name"`: Filter the list by workflow name. - `"startingToken"`: Specify the pagination token from a previous request to retrieve the next page of results. -- `"type"`: The workflows' type. +- `"type"`: Filter the list by workflow type. """ function list_workflows(; aws_config::AbstractAWSConfig=global_aws_config()) return omics("GET", "/workflow"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -2053,9 +2401,10 @@ Starts an annotation import job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"annotationFields"`: The annotation schema generated by the parsed annotation data. +- `"annotationFields"`: The annotation schema generated by the parsed annotation data. - `"formatOptions"`: Formatting options for the annotation file. - `"runLeftNormalization"`: The job's left normalization setting. +- `"versionName"`: The name of the annotation store version. """ function start_annotation_import_job( destinationName, items, roleArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -2295,7 +2644,16 @@ end start_run(request_id, role_arn) start_run(request_id, role_arn, params::Dict{String,<:Any}) -Starts a run. +Starts a workflow run. To duplicate a run, specify the run's ID and a role ARN. The +remaining parameters are copied from the previous run. StartRun will not support re-run for +a workflow that is shared with you. The total number of runs in your account is subject to +a quota per Region. To avoid needing to delete runs manually, you can set the retention +mode to REMOVE. Runs with this setting are deleted automatically when the run quoata is +exceeded. By default, the run uses STATIC storage. For STATIC storage, set the +storageCapacity field. You can set the storage type to DYNAMIC. You do not set +storageCapacity, because HealthOmics dynamically scales the storage up or down as required. +For more information about static and dynamic storage, see Running workflows in the AWS +HealthOmics User Guide. # Arguments - `request_id`: To ensure that requests don't run multiple times, specify a unique ID for @@ -2309,12 +2667,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"outputUri"`: An output URI for the run. - `"parameters"`: Parameters for the run. - `"priority"`: A priority for the run. +- `"retentionMode"`: The retention mode for the run. - `"runGroupId"`: The run's group ID. -- `"runId"`: The run's ID. -- `"storageCapacity"`: A storage capacity for the run in gigabytes. +- `"runId"`: The ID of a run to duplicate. +- `"storageCapacity"`: A storage capacity for the run in gibibytes. This field is not + required if the storage type is dynamic (the system ignores any value that you enter). +- `"storageType"`: The run's storage type. By default, the run uses STATIC storage type, + which allocates a fixed amount of storage. If you set the storage type to DYNAMIC, + HealthOmics dynamically scales the storage up or down, based on file system utilization. - `"tags"`: Tags for the run. - `"workflowId"`: The run's workflow ID. -- `"workflowType"`: The run's workflows type. +- `"workflowOwnerId"`: The ID of the workflow owner. +- `"workflowType"`: The run's workflow type. """ function start_run(requestId, roleArn; aws_config::AbstractAWSConfig=global_aws_config()) return omics( @@ -2359,7 +2723,7 @@ Starts a variant import job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"annotationFields"`: The annotation schema generated by the parsed annotation data. +- `"annotationFields"`: The annotation schema generated by the parsed annotation data. - `"runLeftNormalization"`: The job's left normalization setting. """ function start_variant_import_job( @@ -2506,6 +2870,45 @@ function update_annotation_store( ) end +""" + update_annotation_store_version(name, version_name) + update_annotation_store_version(name, version_name, params::Dict{String,<:Any}) + + Updates the description of an annotation store version. + +# Arguments +- `name`: The name of an annotation store. +- `version_name`: The name of an annotation store version. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of an annotation store. +""" +function update_annotation_store_version( + name, versionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return omics( + "POST", + "/annotationStore/$(name)/version/$(versionName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_annotation_store_version( + name, + versionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return omics( + "POST", + "/annotationStore/$(name)/version/$(versionName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_run_group(id) update_run_group(id, params::Dict{String,<:Any}) @@ -2519,7 +2922,7 @@ Updates a run group. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxCpus"`: The maximum number of CPUs to use. - `"maxDuration"`: A maximum run time for the group in minutes. -- `"maxGpus"`: The maximum GPUs that can be used by a run group. +- `"maxGpus"`: The maximum GPUs that can be used by a run group. - `"maxRuns"`: The maximum number of concurrent runs for the group. - `"name"`: A name for the group. """ @@ -2608,15 +3011,15 @@ end upload_read_set_part(part_number, part_source, payload, sequence_store_id, upload_id) upload_read_set_part(part_number, part_source, payload, sequence_store_id, upload_id, params::Dict{String,<:Any}) - This operation uploads a specific part of a read set. If you upload a new part using a +This operation uploads a specific part of a read set. If you upload a new part using a previously used part number, the previously uploaded part will be overwritten. # Arguments -- `part_number`: The number of the part being uploaded. -- `part_source`: The source file for an upload part. -- `payload`: The read set data to upload for a part. -- `sequence_store_id`: The Sequence Store ID used for the multipart upload. -- `upload_id`: The ID for the initiated multipart upload. +- `part_number`: The number of the part being uploaded. +- `part_source`: The source file for an upload part. +- `payload`: The read set data to upload for a part. +- `sequence_store_id`: The Sequence Store ID used for the multipart upload. +- `upload_id`: The ID for the initiated multipart upload. """ function upload_read_set_part( diff --git a/src/services/opensearch.jl b/src/services/opensearch.jl index 3fbe30f22a..c45c383def 100644 --- a/src/services/opensearch.jl +++ b/src/services/opensearch.jl @@ -40,6 +40,55 @@ function accept_inbound_connection( ) end +""" + add_data_source(data_source_type, domain_name, name) + add_data_source(data_source_type, domain_name, name, params::Dict{String,<:Any}) + +Creates a new direct-query data source to the specified domain. For more information, see +Creating Amazon OpenSearch Service data source integrations with Amazon S3. + +# Arguments +- `data_source_type`: The type of data source. +- `domain_name`: The name of the domain to add the data source to. +- `name`: A name for the data source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the data source. +""" +function add_data_source( + DataSourceType, DomainName, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource", + Dict{String,Any}("DataSourceType" => DataSourceType, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_data_source( + DataSourceType, + DomainName, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DataSourceType" => DataSourceType, "Name" => Name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ add_tags(arn, tag_list) add_tags(arn, tag_list, params::Dict{String,<:Any}) @@ -156,6 +205,44 @@ function authorize_vpc_endpoint_access( ) end +""" + cancel_domain_config_change(domain_name) + cancel_domain_config_change(domain_name, params::Dict{String,<:Any}) + +Cancels a pending configuration change on an Amazon OpenSearch Service domain. + +# Arguments +- `domain_name`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: When set to True, returns the list of change IDs and properties that will be + cancelled without actually cancelling the change. +""" +function cancel_domain_config_change( + DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/config/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_domain_config_change( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/config/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_service_software_update(domain_name) cancel_service_software_update(domain_name, params::Dict{String,<:Any}) @@ -242,6 +329,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys engine version for the OpenSearch Service domain. For example, OpenSearch_1.0 or Elasticsearch_7.9. For more information, see Creating and managing Amazon OpenSearch Service domains. +- `"IPAddressType"`: Specify either dual stack or IPv4 as your IP address type. Dual stack + allows you to share domain resources across IPv4 and IPv6 address types, and is the + recommended option. If you set your IP address type to dual stack, you can't change your + address type later. - `"LogPublishingOptions"`: Key-value pairs to configure log publishing. - `"NodeToNodeEncryptionOptions"`: Enables node-to-node encryption. - `"OffPeakWindowOptions"`: Specifies a daily 10-hour time block during which OpenSearch @@ -450,6 +541,43 @@ function create_vpc_endpoint( ) end +""" + delete_data_source(data_source_name, domain_name) + delete_data_source(data_source_name, domain_name, params::Dict{String,<:Any}) + +Deletes a direct-query data source. For more information, see Deleting an Amazon OpenSearch +Service data source with Amazon S3. + +# Arguments +- `data_source_name`: The name of the data source to delete. +- `domain_name`: The name of the domain. + +""" +function delete_data_source( + DataSourceName, DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "DELETE", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_source( + DataSourceName, + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "DELETE", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_domain(domain_name) delete_domain(domain_name, params::Dict{String,<:Any}) @@ -852,8 +980,7 @@ domains. # Arguments - `domain_names`: Array of OpenSearch Service domain names that you want information about. - If you don't specify any domains, OpenSearch Service returns information about all domains - owned by the account. + You must specify at least one domain name. """ function describe_domains(DomainNames; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1262,6 +1389,81 @@ function get_compatible_versions( ) end +""" + get_data_source(data_source_name, domain_name) + get_data_source(data_source_name, domain_name, params::Dict{String,<:Any}) + +Retrieves information about a direct query data source. + +# Arguments +- `data_source_name`: The name of the data source to get information about. +- `domain_name`: The name of the domain. + +""" +function get_data_source( + DataSourceName, DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source( + DataSourceName, + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_domain_maintenance_status(domain_name, maintenance_id) + get_domain_maintenance_status(domain_name, maintenance_id, params::Dict{String,<:Any}) + +The status of the maintenance action. + +# Arguments +- `domain_name`: The name of the domain. +- `maintenance_id`: The request ID of the maintenance action. + +""" +function get_domain_maintenance_status( + DomainName, maintenanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenance", + Dict{String,Any}("maintenanceId" => maintenanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_domain_maintenance_status( + DomainName, + maintenanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("maintenanceId" => maintenanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_package_version_history(package_id) get_package_version_history(package_id, params::Dict{String,<:Any}) @@ -1378,6 +1580,82 @@ function get_upgrade_status( ) end +""" + list_data_sources(domain_name) + list_data_sources(domain_name, params::Dict{String,<:Any}) + +Lists direct-query data sources for a specific domain. For more information, see For more +information, see Working with Amazon OpenSearch Service direct queries with Amazon S3. + +# Arguments +- `domain_name`: The name of the domain. + +""" +function list_data_sources(DomainName; aws_config::AbstractAWSConfig=global_aws_config()) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_sources( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_domain_maintenances(domain_name) + list_domain_maintenances(domain_name, params::Dict{String,<:Any}) + +A list of maintenance actions for the domain. + +# Arguments +- `domain_name`: The name of the domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"action"`: The name of the action. +- `"maxResults"`: An optional parameter that specifies the maximum number of results to + return. You can use nextToken to get the next page of results. +- `"nextToken"`: If your initial ListDomainMaintenances operation returns a nextToken, + include the returned nextToken in subsequent ListDomainMaintenances operations, which + returns results in the next page. +- `"status"`: The status of the action. +""" +function list_domain_maintenances( + DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenances"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_domain_maintenances( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "GET", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenances", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_domain_names() list_domain_names(params::Dict{String,<:Any}) @@ -1925,6 +2203,47 @@ function revoke_vpc_endpoint_access( ) end +""" + start_domain_maintenance(action, domain_name) + start_domain_maintenance(action, domain_name, params::Dict{String,<:Any}) + +Starts the node maintenance process on the data node. These processes can include a node +reboot, an Opensearch or Elasticsearch process restart, or a Dashboard or Kibana restart. + +# Arguments +- `action`: The name of the action. +- `domain_name`: The name of the domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NodeId"`: The ID of the data node. +""" +function start_domain_maintenance( + Action, DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenance", + Dict{String,Any}("Action" => Action); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_domain_maintenance( + Action, + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "POST", + "/2021-01-01/opensearch/domain/$(DomainName)/domainMaintenance", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Action" => Action), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_service_software_update(domain_name) start_service_software_update(domain_name, params::Dict{String,<:Any}) @@ -1976,11 +2295,60 @@ function start_service_software_update( ) end +""" + update_data_source(data_source_name, data_source_type, domain_name) + update_data_source(data_source_name, data_source_type, domain_name, params::Dict{String,<:Any}) + +Updates a direct-query data source. For more information, see Working with Amazon +OpenSearch Service data source integrations with Amazon S3. + +# Arguments +- `data_source_name`: The name of the data source to modify. +- `data_source_type`: The type of data source. +- `domain_name`: The name of the domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A new description of the data source. +- `"Status"`: The status of the data source update. +""" +function update_data_source( + DataSourceName, + DataSourceType, + DomainName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "PUT", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)", + Dict{String,Any}("DataSourceType" => DataSourceType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_data_source( + DataSourceName, + DataSourceType, + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearch( + "PUT", + "/2021-01-01/opensearch/domain/$(DomainName)/dataSource/$(DataSourceName)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("DataSourceType" => DataSourceType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_domain_config(domain_name) update_domain_config(domain_name, params::Dict{String,<:Any}) -Modifies the cluster configuration of the specified Amazon OpenSearch Service domain.sl +Modifies the cluster configuration of the specified Amazon OpenSearch Service domain. # Arguments - `domain_name`: The name of the domain that you're updating. @@ -2018,6 +2386,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Validating a domain update. - `"EBSOptions"`: The type and size of the EBS volume to attach to instances in the domain. - `"EncryptionAtRestOptions"`: Encryption at rest options for the domain. +- `"IPAddressType"`: Specify either dual stack or IPv4 as your IP address type. Dual stack + allows you to share domain resources across IPv4 and IPv6 address types, and is the + recommended option. If your IP address type is currently set to dual stack, you can't + change it. - `"LogPublishingOptions"`: Options to publish OpenSearch logs to Amazon CloudWatch Logs. - `"NodeToNodeEncryptionOptions"`: Node-to-node encryption options for the domain. - `"OffPeakWindowOptions"`: Off-peak window options for the domain. diff --git a/src/services/opensearchserverless.jl b/src/services/opensearchserverless.jl index 8976d8fa08..3555a4eb3e 100644 --- a/src/services/opensearchserverless.jl +++ b/src/services/opensearchserverless.jl @@ -33,6 +33,82 @@ function batch_get_collection( ) end +""" + batch_get_effective_lifecycle_policy(resource_identifiers) + batch_get_effective_lifecycle_policy(resource_identifiers, params::Dict{String,<:Any}) + +Returns a list of successful and failed retrievals for the OpenSearch Serverless indexes. +For more information, see Viewing data lifecycle policies. + +# Arguments +- `resource_identifiers`: The unique identifiers of policy types and resource names. + +""" +function batch_get_effective_lifecycle_policy( + resourceIdentifiers; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "BatchGetEffectiveLifecyclePolicy", + Dict{String,Any}("resourceIdentifiers" => resourceIdentifiers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_effective_lifecycle_policy( + resourceIdentifiers, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearchserverless( + "BatchGetEffectiveLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceIdentifiers" => resourceIdentifiers), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_get_lifecycle_policy(identifiers) + batch_get_lifecycle_policy(identifiers, params::Dict{String,<:Any}) + +Returns one or more configured OpenSearch Serverless lifecycle policies. For more +information, see Viewing data lifecycle policies. + +# Arguments +- `identifiers`: The unique identifiers of policy types and policy names. + +""" +function batch_get_lifecycle_policy( + identifiers; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "BatchGetLifecyclePolicy", + Dict{String,Any}("identifiers" => identifiers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_lifecycle_policy( + identifiers, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearchserverless( + "BatchGetLifecyclePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifiers" => identifiers), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_vpc_endpoint(ids) batch_get_vpc_endpoint(ids, params::Dict{String,<:Any}) @@ -138,6 +214,7 @@ managing Amazon OpenSearch Serverless collections. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientToken"`: Unique, case-sensitive identifier to ensure idempotency of the request. - `"description"`: Description of the collection. +- `"standbyReplicas"`: Indicates whether standby replicas should be used for a collection. - `"tags"`: An arbitrary set of tags (key–value pairs) to associate with the OpenSearch Serverless collection. - `"type"`: The type of collection. @@ -167,6 +244,65 @@ function create_collection( ) end +""" + create_lifecycle_policy(name, policy, type) + create_lifecycle_policy(name, policy, type, params::Dict{String,<:Any}) + +Creates a lifecyle policy to be applied to OpenSearch Serverless indexes. Lifecycle +policies define the number of days or hours to retain the data on an OpenSearch Serverless +index. For more information, see Creating data lifecycle policies. + +# Arguments +- `name`: The name of the lifecycle policy. +- `policy`: The JSON policy document to use as the content for the lifecycle policy. +- `type`: The type of lifecycle policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure idempotency of the request. +- `"description"`: A description of the lifecycle policy. +""" +function create_lifecycle_policy( + name, policy, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "CreateLifecyclePolicy", + Dict{String,Any}( + "name" => name, + "policy" => policy, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_lifecycle_policy( + name, + policy, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearchserverless( + "CreateLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "policy" => policy, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_security_config(name, type) create_security_config(name, type, params::Dict{String,<:Any}) @@ -425,6 +561,53 @@ function delete_collection( ) end +""" + delete_lifecycle_policy(name, type) + delete_lifecycle_policy(name, type, params::Dict{String,<:Any}) + +Deletes an OpenSearch Serverless lifecycle policy. For more information, see Deleting data +lifecycle policies. + +# Arguments +- `name`: The name of the policy to delete. +- `type`: The type of lifecycle policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Unique, case-sensitive identifier to ensure idempotency of the request. +""" +function delete_lifecycle_policy( + name, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "DeleteLifecyclePolicy", + Dict{String,Any}("name" => name, "type" => type, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_lifecycle_policy( + name, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearchserverless( + "DeleteLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "type" => type, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_security_config(id) delete_security_config(id, params::Dict{String,<:Any}) @@ -559,7 +742,7 @@ control for Amazon OpenSearch Serverless. # Arguments - `name`: The name of the access policy. -- `type`: Tye type of policy. Currently the only supported value is data. +- `type`: Tye type of policy. Currently, the only supported value is data. """ function get_access_policy(name, type; aws_config::AbstractAWSConfig=global_aws_config()) @@ -741,7 +924,7 @@ body {} if you don't include any collection filters in the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"collectionFilters"`: List of filter names and values that you can use for requests. +- `"collectionFilters"`: A list of filter names and values that you can use for requests. - `"maxResults"`: The maximum number of results to return. Default is 20. You can use nextToken to get the next page of results. - `"nextToken"`: If your initial ListCollections operation returns a nextToken, you can @@ -761,6 +944,45 @@ function list_collections( ) end +""" + list_lifecycle_policies(type) + list_lifecycle_policies(type, params::Dict{String,<:Any}) + +Returns a list of OpenSearch Serverless lifecycle policies. For more information, see +Viewing data lifecycle policies. + +# Arguments +- `type`: The type of lifecycle policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: An optional parameter that specifies the maximum number of results to + return. You can use use nextToken to get the next page of results. The default is 10. +- `"nextToken"`: If your initial ListLifecyclePolicies operation returns a nextToken, you + can include the returned nextToken in subsequent ListLifecyclePolicies operations, which + returns results in the next page. +- `"resources"`: Resource filters that policies can apply to. Currently, the only supported + resource type is index. +""" +function list_lifecycle_policies(type; aws_config::AbstractAWSConfig=global_aws_config()) + return opensearchserverless( + "ListLifecyclePolicies", + Dict{String,Any}("type" => type); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lifecycle_policies( + type, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "ListLifecyclePolicies", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("type" => type), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_security_configs(type) list_security_configs(type, params::Dict{String,<:Any}) @@ -1118,6 +1340,65 @@ function update_collection( ) end +""" + update_lifecycle_policy(name, policy_version, type) + update_lifecycle_policy(name, policy_version, type, params::Dict{String,<:Any}) + +Updates an OpenSearch Serverless access policy. For more information, see Updating data +lifecycle policies. + +# Arguments +- `name`: The name of the policy. +- `policy_version`: The version of the policy being updated. +- `type`: The type of lifecycle policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure idempotency of the request. +- `"description"`: A description of the lifecycle policy. +- `"policy"`: The JSON policy document to use as the content for the lifecycle policy. +""" +function update_lifecycle_policy( + name, policyVersion, type; aws_config::AbstractAWSConfig=global_aws_config() +) + return opensearchserverless( + "UpdateLifecyclePolicy", + Dict{String,Any}( + "name" => name, + "policyVersion" => policyVersion, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_lifecycle_policy( + name, + policyVersion, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return opensearchserverless( + "UpdateLifecyclePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "policyVersion" => policyVersion, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_security_config(config_version, id) update_security_config(config_version, id, params::Dict{String,<:Any}) diff --git a/src/services/opsworks.jl b/src/services/opsworks.jl index 434f48ddec..5445cb855d 100644 --- a/src/services/opsworks.jl +++ b/src/services/opsworks.jl @@ -10,10 +10,10 @@ using AWS.UUIDs Assign a registered instance to a layer. You can assign registered on-premises instances to any layer type. You can assign registered Amazon EC2 instances only to custom layers. - You cannot use this action with instances that were created with AWS OpsWorks Stacks. -Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user -must have a Manage permissions level for the stack or an attached policy that explicitly -grants permissions. For more information on user permissions, see Managing User Permissions. + You cannot use this action with instances that were created with OpsWorks Stacks. +Required Permissions: To use this action, an Identity and Access Management (IAM) user must +have a Manage permissions level for the stack or an attached policy that explicitly grants +permissions. For more information on user permissions, see Managing User Permissions. # Arguments - `instance_id`: The instance ID. @@ -138,11 +138,11 @@ end attach_elastic_load_balancer(elastic_load_balancer_name, layer_id) attach_elastic_load_balancer(elastic_load_balancer_name, layer_id, params::Dict{String,<:Any}) -Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks -does not support Application Load Balancer. You can only use Classic Load Balancer with AWS -OpsWorks Stacks. For more information, see Elastic Load Balancing. You must create the -Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, -API, or CLI. For more information, see Elastic Load Balancing Developer Guide. Required +Attaches an Elastic Load Balancing load balancer to a specified layer. OpsWorks Stacks does +not support Application Load Balancer. You can only use Classic Load Balancer with OpsWorks +Stacks. For more information, see Elastic Load Balancing. You must create the Elastic Load +Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. +For more information, see the Elastic Load Balancing Developer Guide. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @@ -198,23 +198,23 @@ use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. # Arguments -- `service_role_arn`: The stack AWS Identity and Access Management (IAM) role, which allows - AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter - to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using - the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing - stack's IAM ARN programmatically by calling DescribePermissions. For more information about - IAM ARNs, see Using Identifiers. You must set this parameter to a valid service role ARN - or the action will fail; there is no default value. You can specify the source stack's - service role ARN, if you prefer, but you must do so explicitly. +- `service_role_arn`: The stack Identity and Access Management (IAM) role, which allows + OpsWorks Stacks to work with Amazon Web Services resources on your behalf. You must set + this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a + stack by using the OpsWorkss Stacks console, it creates the role for you. You can obtain an + existing stack's IAM ARN programmatically by calling DescribePermissions. For more + information about IAM ARNs, see Using Identifiers. You must set this parameter to a valid + service role ARN or the action will fail; there is no default value. You can specify the + source stack's service role ARN, if you prefer, but you must do so explicitly. - `source_stack_id`: The source stack ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentVersion"`: The default AWS OpsWorks Stacks agent version. You have the following - options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically +- `"AgentVersion"`: The default OpsWorks Stacks agent version. You have the following + options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, - you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then + you must edit the stack configuration and specify a new version. OpsWorks Stacks automatically installs that version on the stack's instances. The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, @@ -247,27 +247,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. - `"DefaultOs"`: The stack's operating system, which must be set to one of the following. - A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, - Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, - Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such - as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat - Enterprise Linux 7 Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 - R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or - Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify - the custom AMI you want to use when you create instances. For more information about how to - use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is the parent - stack's operating system. For more information about supported operating systems, see AWS - OpsWorks Stacks Operating Systems. You can specify a different Linux operating system for - the cloned stack, but you cannot change from Linux to Windows or Windows to Linux. + A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon + Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon + Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu + operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu + 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 Microsoft Windows Server + 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows + Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL + Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you + create instances. For more information about how to use custom AMIs with OpsWorks, see + Using Custom AMIs. The default option is the parent stack's operating system. Not all + operating systems are supported with all versions of Chef. For more information about + supported operating systems, see OpsWorks Stacks Operating Systems. You can specify a + different Linux operating system for the cloned stack, but you cannot change from Linux to + Windows or Windows to Linux. - `"DefaultRootDeviceType"`: The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device. - `"DefaultSshKeyName"`: A default Amazon EC2 key pair name. The default value is none. If - you specify a key pair name, AWS OpsWorks installs the public key on the instance and you - can use the private key with an SSH client to log in to the instance. For more information, - see Using SSH to Communicate with an Instance and Managing SSH Access. You can override - this setting by specifying a different key pair, or no key pair, when you create an - instance. + you specify a key pair name, OpsWorks installs the public key on the instance and you can + use the private key with an SSH client to log in to the instance. For more information, see + Using SSH to Communicate with an Instance and Managing SSH Access. You can override this + setting by specifying a different key pair, or no key pair, when you create an instance. - `"DefaultSubnetId"`: The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for @@ -281,36 +282,36 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. -- `"Name"`: The cloned stack name. -- `"Region"`: The cloned stack AWS region, such as \"ap-northeast-2\". For more information - about AWS regions, see Regions and Endpoints. +- `"Name"`: The cloned stack name. Stack names can be a maximum of 64 characters. +- `"Region"`: The cloned stack Amazon Web Services Region, such as ap-northeast-2. For more + information about Amazon Web Services Regions, see Regions and Endpoints. - `"UseCustomCookbooks"`: Whether to use custom cookbooks. -- `"UseOpsworksSecurityGroups"`: Whether to associate the AWS OpsWorks Stacks built-in - security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of - built-in security groups, one for each layer, which are associated with layers by default. - With UseOpsworksSecurityGroups you can instead provide your own custom security groups. - UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks +- `"UseOpsworksSecurityGroups"`: Whether to associate the OpsWorks Stacks built-in security + groups with the stack's layers. OpsWorks Stacks provides a standard set of security groups, + one for each layer, which are associated with layers by default. With + UseOpsworksSecurityGroups you can instead provide your own custom security groups. + UseOpsworksSecurityGroups has the following settings: True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but - you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not - associate built-in security groups with layers. You must create appropriate Amazon Elastic - Compute Cloud (Amazon EC2) security groups and associate a security group with each layer - that you create. However, you can still manually associate a built-in security group with a - layer on creation; custom security groups are required only for those layers that need - custom settings. For more information, see Create a New Stack. + you cannot delete the built-in security group. False - OpsWorks Stacks does not associate + built-in security groups with layers. You must create appropriate Amazon EC2 security + groups and associate a security group with each layer that you create. However, you can + still manually associate a built-in security group with a layer on creation; custom + security groups are required only for those layers that need custom settings. For more + information, see Create a New Stack. - `"VpcId"`: The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later. If your account supports EC2 Classic, the default value is no VPC. If your account does not support EC2 Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified - either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks - Stacks infers the value of the other parameter. If you specify neither parameter, AWS - OpsWorks Stacks sets these parameters to the first valid Availability Zone for the - specified region and the corresponding default VPC subnet ID, respectively. If you specify - a nondefault VPC ID, note the following: It must belong to a VPC in your account that is - in the specified region. You must specify a value for DefaultSubnetId. For more - information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. - For more information about default VPC and EC2 Classic, see Supported Platforms. + either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks + infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks + sets these parameters to the first valid Availability Zone for the specified region and the + corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, + note the following: It must belong to a VPC in your account that is in the specified + region. You must specify a value for DefaultSubnetId. For more information about how to + use OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about + default VPC and EC2 Classic, see Supported Platforms. """ function clone_stack( ServiceRoleArn, SourceStackId; aws_config::AbstractAWSConfig=global_aws_config() @@ -359,7 +360,7 @@ user permissions, see Managing User Permissions. - `name`: The app name. - `stack_id`: The stack ID. - `type`: The app type. Each supported type is associated with a particular layer. For - example, PHP applications are associated with a PHP layer. AWS OpsWorks Stacks deploys an + example, PHP applications are associated with a PHP layer. OpsWorks Stacks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other. @@ -490,14 +491,14 @@ For more information on user permissions, see Managing User Permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentVersion"`: The default AWS OpsWorks Stacks agent version. You have the following +- `"AgentVersion"`: The default OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To - update the agent version, edit the instance configuration and specify a new version. AWS - OpsWorks Stacks then automatically installs that version on the instance. The default - setting is INHERIT. To specify an agent version, you must use the complete version number, - not the abbreviated number shown on the console. For a list of available agent version - numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + update the agent version, edit the instance configuration and specify a new version. + OpsWorks Stacks installs that version on the instance. The default setting is INHERIT. To + specify an agent version, you must use the complete version number, not the abbreviated + number shown on the console. For a list of available agent version numbers, call + DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. - `"AmiId"`: A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs. If you specify a custom AMI, you must set Os to Custom. @@ -512,7 +513,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance's block devices. For more information, see Block Device Mapping. Note that block device mappings are not supported for custom AMIs. - `"EbsOptimized"`: Whether to create an Amazon EBS-optimized instance. -- `"Hostname"`: The instance host name. +- `"Hostname"`: The instance host name. The following are character limits for instance + host names. Linux-based instances: 63 characters Windows-based instances: 15 characters + - `"InstallUpdatesOnBoot"`: Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment @@ -520,25 +523,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true to ensure that your instances have the latest security updates. - `"Os"`: The instance's operating system, which must be set to one of the following. A - supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, - Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, - Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such - as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat - Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows - Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft - Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with - SQL Server Web. A custom AMI: Custom. For more information about the supported - operating systems, see AWS OpsWorks Stacks Operating Systems. The default option is the - current Amazon Linux version. If you set this parameter to Custom, you must use the + supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon + Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon + Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu + operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu + 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows + operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server + 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, + or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. Not all + operating systems are supported with all versions of Chef. For more information about the + supported operating systems, see OpsWorks Stacks Operating Systems. The default option is + the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about - supported operating systems, see Operating SystemsFor more information about how to use - custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs. + how to use custom AMIs with OpsWorks Stacks, see Using Custom AMIs. - `"RootDeviceType"`: The instance root device type. For more information, see Storage for the Root Device. - `"SshKeyName"`: The instance's Amazon EC2 key-pair name. - `"SubnetId"`: The ID of the instance's subnet. If the stack is running in a VPC, you can - use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks + use this parameter to override the stack's default subnet ID value and direct OpsWorks Stacks to launch the instance in a different subnet. - `"Tenancy"`: The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are @@ -600,13 +603,14 @@ a Manage permissions level for the stack, or an attached policy that explicitly permissions. For more information on user permissions, see Managing User Permissions. # Arguments -- `name`: The layer name, which is used by the console. +- `name`: The layer name, which is used by the console. Layer names can be a maximum of 32 + characters. - `shortname`: For custom layers only, use this parameter to specify the layer's short - name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name - is also used as the name for the directory where your app files are installed. It can have - a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', - and '.'. The built-in layers' short names are defined by AWS OpsWorks Stacks. For more - information, see the Layer Reference. + name, which is used internally by OpsWorks Stacks and by Chef recipes. The short name is + also used as the name for the directory where your app files are installed. It can have a + maximum of 32 characters, which are limited to the alphanumeric characters, '-', '_', and + '.'. Built-in layer short names are defined by OpsWorks Stacks. For more information, see + the Layer Reference. - `stack_id`: The layer stack ID. - `type`: The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 @@ -627,7 +631,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instances. For more information about IAM ARNs, see Using Identifiers. - `"CustomJson"`: A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see - Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI. + Using Custom JSON. This feature is supported as of version 1.7.42 of the CLI. - `"CustomRecipes"`: A LayerCustomRecipes object that specifies the layer custom recipes. - `"CustomSecurityGroupIds"`: An array containing the layer custom security group IDs. - `"EnableAutoHealing"`: Whether to disable auto healing for the layer. @@ -696,39 +700,39 @@ permissions. For more information about user permissions, see Managing User Perm - `default_instance_profile_arn`: The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. -- `name`: The stack name. -- `region`: The stack's AWS region, such as ap-south-1. For more information about Amazon - regions, see Regions and Endpoints. In the AWS CLI, this API maps to the --stack-region - parameter. If the --stack-region parameter and the AWS CLI common parameter --region are - set to the same value, the stack uses a regional endpoint. If the --stack-region parameter - is not set, but the AWS CLI --region parameter is, this also results in a stack with a - regional endpoint. However, if the --region parameter is set to us-east-1, and the - --stack-region parameter is set to one of the following, then the stack uses a legacy or - classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, - ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in - us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API - endpoint. Because it is a best practice to choose the regional endpoint that is closest to - where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS - CLI common --region parameter always specifies a regional API endpoint; it cannot be used - to specify a classic AWS OpsWorks Stacks region. -- `service_role_arn`: The stack's AWS Identity and Access Management (IAM) role, which - allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this - parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information - about IAM ARNs, see Using Identifiers. +- `name`: The stack name. Stack names can be a maximum of 64 characters. +- `region`: The stack's Amazon Web Services Region, such as ap-south-1. For more + information about Amazon Web Services Regions, see Regions and Endpoints. In the CLI, this + API maps to the --stack-region parameter. If the --stack-region parameter and the CLI + common parameter --region are set to the same value, the stack uses a regional endpoint. If + the --stack-region parameter is not set, but the CLI --region parameter is, this also + results in a stack with a regional endpoint. However, if the --region parameter is set to + us-east-1, and the --stack-region parameter is set to one of the following, then the stack + uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, + ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of + the stack is in us-east-1. Only the preceding regions are supported as classic regions in + the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint + that is closest to where you manage Amazon Web Services, we recommend that you use regional + endpoints for new stacks. The CLI common --region parameter always specifies a regional API + endpoint; it cannot be used to specify a classic OpsWorks Stacks region. +- `service_role_arn`: The stack's IAM role, which allows OpsWorks Stacks to work with + Amazon Web Services resources on your behalf. You must set this parameter to the Amazon + Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see + Using Identifiers. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentVersion"`: The default AWS OpsWorks Stacks agent version. You have the following - options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically +- `"AgentVersion"`: The default OpsWorks Stacks agent version. You have the following + options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, - you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then - automatically installs that version on the stack's instances. The default setting is the - most recent release of the agent. To specify an agent version, you must use the complete - version number, not the abbreviated number shown on the console. For a list of available - agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. - You can also specify an agent version when you create or update an instance, which - overrides the stack's default setting. + you must edit the stack configuration and specify a new version. OpsWorks Stacks installs + that version on the stack's instances. The default setting is the most recent release of + the agent. To specify an agent version, you must use the complete version number, not the + abbreviated number shown on the console. For a list of available agent version numbers, + call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify + an agent version when you create or update an instance, which overrides the stack's default + setting. - `"Attributes"`: One or more user-defined key-value pairs to be added to the stack attributes. - `"ChefConfiguration"`: A ChefConfiguration object that specifies whether to enable @@ -752,25 +756,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DefaultOs"`: The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following. A supported Linux operating system: An Amazon Linux - version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon - Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A - supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu - 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows - operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server - 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, - or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You - specify the custom AMI you want to use when you create instances. For more information, see - Using Custom AMIs. The default option is the current Amazon Linux version. For more - information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. + version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux + 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, + Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 + A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, + Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 + with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A + custom AMI: Custom. You specify the custom AMI you want to use when you create instances. + For more information, see Using Custom AMIs. The default option is the current Amazon + Linux version. Not all operating systems are supported with all versions of Chef. For more + information about supported operating systems, see OpsWorks Stacks Operating Systems. - `"DefaultRootDeviceType"`: The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device. - `"DefaultSshKeyName"`: A default Amazon EC2 key pair name. The default value is none. If - you specify a key pair name, AWS OpsWorks installs the public key on the instance and you - can use the private key with an SSH client to log in to the instance. For more information, - see Using SSH to Communicate with an Instance and Managing SSH Access. You can override - this setting by specifying a different key pair, or no key pair, when you create an - instance. + you specify a key pair name, OpsWorks installs the public key on the instance and you can + use the private key with an SSH client to log in to the instance. For more information, see + Using SSH to Communicate with an Instance and Managing SSH Access. You can override this + setting by specifying a different key pair, or no key pair, when you create an instance. - `"DefaultSubnetId"`: The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for @@ -784,14 +788,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. - `"UseCustomCookbooks"`: Whether the stack uses custom cookbooks. -- `"UseOpsworksSecurityGroups"`: Whether to associate the AWS OpsWorks Stacks built-in - security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of - built-in security groups, one for each layer, which are associated with layers by default. - With UseOpsworksSecurityGroups you can instead provide your own custom security groups. - UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks +- `"UseOpsworksSecurityGroups"`: Whether to associate the OpsWorks Stacks built-in security + groups with the stack's layers. OpsWorks Stacks provides a standard set of built-in + security groups, one for each layer, which are associated with layers by default. With + UseOpsworksSecurityGroups you can instead provide your own custom security groups. + UseOpsworksSecurityGroups has the following settings: True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, - but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not + but you cannot delete the built-in security group. False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom @@ -802,14 +806,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys later. If your account supports EC2-Classic, the default value is no VPC. If your account does not support EC2-Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified - either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks - Stacks infers the value of the other parameter. If you specify neither parameter, AWS - OpsWorks Stacks sets these parameters to the first valid Availability Zone for the - specified region and the corresponding default VPC subnet ID, respectively. If you specify - a nondefault VPC ID, note the following: It must belong to a VPC in your account that is - in the specified region. You must specify a value for DefaultSubnetId. For more - information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. - For more information about default VPC and EC2-Classic, see Supported Platforms. + either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks + infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks + sets these parameters to the first valid Availability Zone for the specified region and the + corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note + the following: It must belong to a VPC in your account that is in the specified region. + You must specify a value for DefaultSubnetId. For more information about how to use + OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about + default VPC and EC2-Classic, see Supported Platforms. """ function create_stack( DefaultInstanceProfileArn, @@ -874,9 +878,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys My Settings page. For more information, see Setting an IAM User's Public SSH Key. - `"SshPublicKey"`: The user's public SSH key. - `"SshUsername"`: The user's SSH user name. The allowable characters are [a-z], [A-Z], - [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks - Stacks removes them. For example, my.name will be changed to myname. If you do not specify - an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name. + [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks + Stacks removes them. For example, my.name is changed to myname. If you do not specify an + SSH user name, OpsWorks Stacks generates one from the IAM user name. """ function create_user_profile(IamUserArn; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -1117,11 +1121,11 @@ end deregister_elastic_ip(elastic_ip) deregister_elastic_ip(elastic_ip, params::Dict{String,<:Any}) -Deregisters a specified Elastic IP address. The address can then be registered by another -stack. For more information, see Resource Management. Required Permissions: To use this -action, an IAM user must have a Manage permissions level for the stack, or an attached -policy that explicitly grants permissions. For more information on user permissions, see -Managing User Permissions. +Deregisters a specified Elastic IP address. The address can be registered by another stack +after it is deregistered. For more information, see Resource Management. Required +Permissions: To use this action, an IAM user must have a Manage permissions level for the +stack, or an attached policy that explicitly grants permissions. For more information on +user permissions, see Managing User Permissions. # Arguments - `elastic_ip`: The Elastic IP address. @@ -1154,9 +1158,9 @@ end deregister_instance(instance_id) deregister_instance(instance_id, params::Dict{String,<:Any}) -Deregister a registered Amazon EC2 or on-premises instance. This action removes the -instance from the stack and returns it to your control. This action cannot be used with -instances that were created with AWS OpsWorks Stacks. Required Permissions: To use this +Deregister an instance from OpsWorks Stacks. The instance can be a registered instance +(Amazon EC2 or on-premises) or an instance created with OpsWorks. This action removes the +instance from the stack and returns it to your control. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @@ -1238,9 +1242,9 @@ explicitly grants permissions. For more information on user permissions, see Man Permissions. # Arguments -- `volume_id`: The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks - Stacks assigned to the instance when you registered the volume with the stack, not the - Amazon EC2 volume ID. +- `volume_id`: The OpsWorks Stacks volume ID, which is the GUID that OpsWorks Stacks + assigned to the instance when you registered the volume with the stack, not the Amazon EC2 + volume ID. """ function deregister_volume(VolumeId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1270,9 +1274,9 @@ end describe_agent_versions() describe_agent_versions(params::Dict{String,<:Any}) -Describes the available AWS OpsWorks Stacks agent versions. You must specify a stack ID or -a configuration manager. DescribeAgentVersions returns a list of available agent versions -for the specified stack or configuration manager. +Describes the available OpsWorks Stacks agent versions. You must specify a stack ID or a +configuration manager. DescribeAgentVersions returns a list of available agent versions for +the specified stack or configuration manager. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1399,7 +1403,7 @@ end Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, -AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a +OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing @@ -1635,7 +1639,7 @@ end describe_operating_systems() describe_operating_systems(params::Dict{String,<:Any}) -Describes the operating systems that are supported by AWS OpsWorks Stacks. +Describes the operating systems that are supported by OpsWorks Stacks. """ function describe_operating_systems(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1759,11 +1763,10 @@ end describe_service_errors() describe_service_errors(params::Dict{String,<:Any}) -Describes AWS OpsWorks Stacks service errors. Required Permissions: To use this action, an -IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an -attached policy that explicitly grants permissions. For more information about user -permissions, see Managing User Permissions. This call accepts only one resource-identifying -parameter. +Describes OpsWorks Stacks service errors. Required Permissions: To use this action, an IAM +user must have a Show, Deploy, or Manage permissions level for the stack, or an attached +policy that explicitly grants permissions. For more information about user permissions, see +Managing User Permissions. This call accepts only one resource-identifying parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1870,7 +1873,11 @@ permissions, see Managing User Permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"StackIds"`: An array of stack IDs that specify the stacks to be described. If you omit - this parameter, DescribeStacks returns a description of every stack. + this parameter, and have permissions to get information about all stacks, DescribeStacks + returns a description of every stack. If the IAM policy that is attached to an IAM user + limits the DescribeStacks action to specific stack ARNs, this parameter is required, and + the user must specify a stack ARN that is allowed by the policy. Otherwise, DescribeStacks + returns an AccessDenied error. """ function describe_stacks(; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -2114,14 +2121,13 @@ end for a specified time period. # Arguments -- `instance_id`: The instance's AWS OpsWorks Stacks ID. +- `instance_id`: The instance's OpsWorks Stacks ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ValidForInMinutes"`: The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the - credentials to log in. If the user is logged in at the time, he or she automatically will - be logged out. + credentials to log in. If the user is logged in at the time, they are logged out. """ function grant_access(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -2316,26 +2322,28 @@ end register_instance(stack_id) register_instance(stack_id, params::Dict{String,<:Any}) -Registers instances that were created outside of AWS OpsWorks Stacks with a specified -stack. We do not recommend using this action to register instances. The complete -registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the -instance, and registering the instance with the stack. RegisterInstance handles only the -second step. You should instead use the AWS CLI register command, which performs the entire -registration operation. For more information, see Registering an Instance with an AWS -OpsWorks Stacks Stack. Registered instances have the same requirements as instances that -are created by using the CreateInstance API. For example, registered instances must be -running a supported Linux-based operating system, and they must have a supported instance -type. For more information about requirements for instances that you want to register, see -Preparing the Instance. Required Permissions: To use this action, an IAM user must have a -Manage permissions level for the stack or an attached policy that explicitly grants -permissions. For more information on user permissions, see Managing User Permissions. +Registers instances that were created outside of OpsWorks Stacks with a specified stack. +We do not recommend using this action to register instances. The complete registration +operation includes two tasks: installing the OpsWorks Stacks agent on the instance, and +registering the instance with the stack. RegisterInstance handles only the second step. You +should instead use the CLI register command, which performs the entire registration +operation. For more information, see Registering an Instance with an OpsWorks Stacks +Stack. Registered instances have the same requirements as instances that are created by +using the CreateInstance API. For example, registered instances must be running a supported +Linux-based operating system, and they must have a supported instance type. For more +information about requirements for instances that you want to register, see Preparing the +Instance. Required Permissions: To use this action, an IAM user must have a Manage +permissions level for the stack or an attached policy that explicitly grants permissions. +For more information on user permissions, see Managing User Permissions. # Arguments - `stack_id`: The ID of the stack that the instance is to be registered with. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Hostname"`: The instance's hostname. +- `"Hostname"`: The instance's host name. The following are character limits for instance + host names. Linux-based instances: 63 characters Windows-based instances: 15 characters + - `"InstanceIdentity"`: An InstanceIdentity object that contains the instance's identity. - `"PrivateIp"`: The instance's private IP address. - `"PublicIp"`: The instance's public IP address. @@ -2480,11 +2488,11 @@ permissions, see Managing User Permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DownScaling"`: An AutoScalingThresholds object with the downscaling threshold - configuration. If the load falls below these thresholds for a specified amount of time, AWS + configuration. If the load falls below these thresholds for a specified amount of time, OpsWorks Stacks stops a specified number of instances. - `"Enable"`: Enables load-based auto scaling for the layer. - `"UpScaling"`: An AutoScalingThresholds object with the upscaling threshold - configuration. If the load exceeds these thresholds for a specified amount of time, AWS + configuration. If the load exceeds these thresholds for a specified amount of time, OpsWorks Stacks starts a specified number of instances. """ function set_load_based_auto_scaling( @@ -2687,9 +2695,9 @@ information on user permissions, see Managing User Permissions. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Force"`: Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call - disassociates the AWS OpsWorks Stacks instance from EC2, and forces deletion of only the + disassociates the OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 - after troubleshooting and replacing the AWS OpsWorks Stacks instance with a new one. + after troubleshooting and replacing the OpsWorks Stacks instance with a new one. """ function stop_instance(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -2749,8 +2757,8 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more -information about how tagging works, see Tags in the AWS OpsWorks User Guide. +Apply cost-allocation tags to a specified stack or layer in OpsWorks Stacks. For more +information about how tagging works, see Tags in the OpsWorks User Guide. # Arguments - `resource_arn`: The stack or layer's Amazon Resource Number (ARN). @@ -2797,10 +2805,10 @@ end Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as -needed. You cannot use this action with instances that were created with AWS OpsWorks -Stacks. Required Permissions: To use this action, an IAM user must have a Manage -permissions level for the stack or an attached policy that explicitly grants permissions. -For more information about user permissions, see Managing User Permissions. +needed. You cannot use this action with instances that were created with OpsWorks Stacks. +Required Permissions: To use this action, an IAM user must have a Manage permissions level +for the stack or an attached policy that explicitly grants permissions. For more +information about user permissions, see Managing User Permissions. # Arguments - `instance_id`: The instance ID. @@ -2974,7 +2982,7 @@ For more information on user permissions, see Managing User Permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Name"`: The new name. +- `"Name"`: The new name, which can be a maximum of 32 characters. """ function update_elastic_ip(ElasticIp; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -3012,14 +3020,14 @@ permissions. For more information on user permissions, see Managing User Permiss # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentVersion"`: The default AWS OpsWorks Stacks agent version. You have the following +- `"AgentVersion"`: The default OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new - version. AWS OpsWorks Stacks then automatically installs that version on the instance. - The default setting is INHERIT. To specify an agent version, you must use the complete - version number, not the abbreviated number shown on the console. For a list of available - agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + version. OpsWorks Stacks installs that version on the instance. The default setting is + INHERIT. To specify an agent version, you must use the complete version number, not the + abbreviated number shown on the console. For a list of available agent version numbers, + call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. - `"AmiId"`: The ID of the AMI that was used to create the instance. The value of this parameter must be the same AMI ID that the instance is already using. You cannot apply a new AMI to an instance by running UpdateInstance. UpdateInstance does not work on instances @@ -3030,7 +3038,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AutoScalingType"`: For load-based or time-based instances, the type. Windows stacks can use only time-based instances. - `"EbsOptimized"`: This property cannot be updated. -- `"Hostname"`: The instance host name. +- `"Hostname"`: The instance host name. The following are character limits for instance + host names. Linux-based instances: 63 characters Windows-based instances: 15 characters + - `"InstallUpdatesOnBoot"`: Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment @@ -3045,20 +3055,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LayerIds"`: The instance's layer IDs. - `"Os"`: The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI. A supported Linux operating system: - An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux - 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux - 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, - or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported - Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows - Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server - Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. For more information - about supported operating systems, see AWS OpsWorks Stacks Operating Systems. The default - option is the current Amazon Linux version. If you set this parameter to Custom, you must - use the AmiId parameter to specify the custom AMI that you want to use. For more - information about supported operating systems, see Operating Systems. For more information - about how to use custom AMIs with OpsWorks, see Using Custom AMIs. You can specify a - different Linux operating system for the updated stack, but you cannot change from Linux to - Windows or Windows to Linux. + An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux + 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux + 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu + 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 + Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft + Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, + Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 + R2 with SQL Server Web. Not all operating systems are supported with all versions of + Chef. For more information about supported operating systems, see OpsWorks Stacks Operating + Systems. The default option is the current Amazon Linux version. If you set this parameter + to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. + For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. + You can specify a different Linux operating system for the updated stack, but you cannot + change from Linux to Windows or Windows to Linux. - `"SshKeyName"`: The instance's Amazon EC2 key name. """ function update_instance(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3121,14 +3131,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true, to ensure that your instances have the latest security updates. - `"LifecycleEventConfiguration"`: -- `"Name"`: The layer name, which is used by the console. +- `"Name"`: The layer name, which is used by the console. Layer names can be a maximum of + 32 characters. - `"Packages"`: An array of Package objects that describe the layer's packages. - `"Shortname"`: For custom layers only, use this parameter to specify the layer's short - name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also - used as the name for the directory where your app files are installed. It can have a - maximum of 200 characters and must be in the following format: /A[a-z0-9-_.]+Z/. The - built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see - the Layer Reference + name, which is used internally by OpsWorks Stacks and by Chef. The short name is also used + as the name for the directory where your app files are installed. It can have a maximum of + 32 characters and must be in the following format: /A[a-z0-9-_.]+Z/. Built-in layer short + names are defined by OpsWorks Stacks. For more information, see the Layer reference in the + OpsWorks User Guide. - `"UseEbsOptimizedInstances"`: Whether to use Amazon EBS-optimized instances. - `"VolumeConfigurations"`: A VolumeConfigurations object that describes the layer's Amazon EBS volumes. @@ -3236,17 +3247,16 @@ permissions. For more information on user permissions, see Managing User Permiss # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentVersion"`: The default AWS OpsWorks Stacks agent version. You have the following - options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically +- `"AgentVersion"`: The default OpsWorks Stacks agent version. You have the following + options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, - you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then - automatically installs that version on the stack's instances. The default setting is - LATEST. To specify an agent version, you must use the complete version number, not the - abbreviated number shown on the console. For a list of available agent version numbers, - call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify - an agent version when you create or update an instance, which overrides the stack's default - setting. + you must edit the stack configuration and specify a new version. OpsWorks Stacks installs + that version on the stack's instances. The default setting is LATEST. To specify an agent + version, you must use the complete version number, not the abbreviated number shown on the + console. For a list of available agent version numbers, call DescribeAgentVersions. + AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you + create or update an instance, which overrides the stack's default setting. - `"Attributes"`: One or more user-defined key-value pairs to be added to the stack attributes. - `"ChefConfiguration"`: A ChefConfiguration object that specifies whether to enable @@ -3271,23 +3281,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. - `"DefaultOs"`: The stack's operating system, which must be set to one of the following: - A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, - Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, - Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such - as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat - Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows - Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft - Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with - SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you - create instances. For more information about how to use custom AMIs with OpsWorks, see - Using Custom AMIs. The default option is the stack's current operating system. For more - information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. + A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon + Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon + Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu + operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu + 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows + operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server + 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, + or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You + specify the custom AMI you want to use when you create instances. For more information + about how to use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is + the stack's current operating system. Not all operating systems are supported with all + versions of Chef. For more information about supported operating systems, see OpsWorks + Stacks Operating Systems. - `"DefaultRootDeviceType"`: The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device. - `"DefaultSshKeyName"`: A default Amazon EC2 key-pair name. The default value is none. If - you specify a key-pair name, AWS OpsWorks Stacks installs the public key on the instance - and you can use the private key with an SSH client to log in to the instance. For more + you specify a key-pair name, OpsWorks Stacks installs the public key on the instance and + you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. @@ -3304,22 +3316,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. -- `"Name"`: The stack's new name. +- `"Name"`: The stack's new name. Stack names can be a maximum of 64 characters. - `"ServiceRoleArn"`: Do not use this parameter. You cannot update a stack's service role. - `"UseCustomCookbooks"`: Whether the stack uses custom cookbooks. -- `"UseOpsworksSecurityGroups"`: Whether to associate the AWS OpsWorks Stacks built-in - security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of - built-in security groups, one for each layer, which are associated with layers by default. +- `"UseOpsworksSecurityGroups"`: Whether to associate the OpsWorks Stacks built-in security + groups with the stack's layers. OpsWorks Stacks provides a standard set of built-in + security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings: True - - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with - each layer (default setting). You can associate additional security groups with a layer - after you create it, but you cannot delete the built-in security group. False - AWS - OpsWorks Stacks does not associate built-in security groups with layers. You must create - appropriate EC2 security groups and associate a security group with each layer that you - create. However, you can still manually associate a built-in security group with a layer - on. Custom security groups are required only for those layers that need custom settings. - For more information, see Create a New Stack. + OpsWorks Stacks automatically associates the appropriate built-in security group with each + layer (default setting). You can associate additional security groups with a layer after + you create it, but you cannot delete the built-in security group. False - OpsWorks Stacks + does not associate built-in security groups with layers. You must create appropriate EC2 + security groups and associate a security group with each layer that you create. However, + you can still manually associate a built-in security group with a layer on. Custom security + groups are required only for those layers that need custom settings. For more + information, see Create a New Stack. """ function update_stack(StackId; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -3357,9 +3369,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys My Settings page. For more information, see Managing User Permissions. - `"SshPublicKey"`: The user's new SSH public key. - `"SshUsername"`: The user's SSH user name. The allowable characters are [a-z], [A-Z], - [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks + [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify - an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name. + an SSH user name, OpsWorks Stacks generates one from the IAM user name. """ function update_user_profile(IamUserArn; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( @@ -3399,7 +3411,7 @@ For more information on user permissions, see Managing User Permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MountPoint"`: The new mount point. -- `"Name"`: The new name. +- `"Name"`: The new name. Volume names can be a maximum of 128 characters. """ function update_volume(VolumeId; aws_config::AbstractAWSConfig=global_aws_config()) return opsworks( diff --git a/src/services/organizations.jl b/src/services/organizations.jl index 4c461998b1..ffb0df1a7b 100644 --- a/src/services/organizations.jl +++ b/src/services/organizations.jl @@ -16,13 +16,13 @@ for an invitation to join must have the organizations:AcceptHandshake permission enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see -Organizations and Service-Linked Roles in the Organizations User Guide. Enable all +Organizations and service-linked roles in the Organizations User Guide. Enable all features final confirmation handshake: only a principal from the management account. For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in -the Organizations User Guide. After you accept a handshake, it continues to appear in -the results of relevant APIs for only 30 days. After that, it's deleted. +the Organizations User Guide. After you accept a handshake, it continues to appear in the +results of relevant APIs for only 30 days. After that, it's deleted. # Arguments - `handshake_id`: The unique identifier (ID) of the handshake that you want to accept. The @@ -61,7 +61,8 @@ Attaches a policy to a root, an organizational unit (OU), or an individual accou policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY This operation can be called -only from the organization's management account. +only from the organization's management account or by a member account that is a delegated +administrator for an Amazon Web Services service. # Arguments - `policy_id`: The unique identifier (ID) of the policy that you want to attach to the @@ -165,18 +166,16 @@ progress, Account status will indicate PENDING_CLOSURE. When the close account r completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in -Organizations in the Organizations User Guide. You can close only 10% of member -accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a +Organizations in the Organizations User Guide. You can close only 10% of member +accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can -close additional accounts in the Billing console. For more information, see Closing an -account in the Amazon Web Services Billing and Cost Management User Guide. To reinstate a +close additional accounts. For more information, see Closing a member account in your +organization and Quotas for Organizationsin the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon -Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. For -more information about closing accounts, see Closing an Amazon Web Services account in the -Organizations User Guide. +Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. # Arguments - `account_id`: Retrieves the Amazon Web Services account Id for the current CloseAccount @@ -219,11 +218,11 @@ account. To check the status of the request, do one of the following: Use the the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see -Logging and monitoring in Organizations in the Organizations User Guide. The user who +Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see -Organizations and Service-Linked Roles in the Organizations User Guide. If the request +Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account @@ -231,27 +230,27 @@ administrator permissions in the new member account. Principals in the managemen can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, -see Creating an Amazon Web Services account in Your Organization in the Organizations User -Guide. When you create an account in an organization using the Organizations console, -API, or CLI commands, the information required for the account to operate as a standalone -account, such as a payment method and signing the end user license agreement (EULA) is not -automatically collected. If you must remove an account from your organization later, you -can do so only after you provide the missing information. Follow the steps at To leave an -organization as a member account in the Organizations User Guide. If you get an exception -that indicates that you exceeded your account limits for the organization, contact Amazon -Web Services Support. If you get an exception that indicates that the operation failed -because your organization is still initializing, wait one hour and then try again. If the -error persists, contact Amazon Web Services Support. Using CreateAccount to create -multiple temporary accounts isn't recommended. You can only close an account from the -Billing and Cost Management console, and you must be signed in as the root user. For -information on the requirements and process for closing an account, see Closing an Amazon -Web Services account in the Organizations User Guide. When you create a member account -with this operation, you can choose whether to create the account with the IAM User and -Role Access to Billing Information switch enabled. If you enable it, IAM users and roles -that have appropriate permissions can view billing information for the account. If you -disable it, only the account root user can access billing information. For information -about how to disable this switch for an account, see Granting Access to Your Billing -Information and Tools. +see Creating a member account in your organization in the Organizations User Guide. When +you create an account in an organization using the Organizations console, API, or CLI +commands, the information required for the account to operate as a standalone account, such +as a payment method and signing the end user license agreement (EULA) is not automatically +collected. If you must remove an account from your organization later, you can do so only +after you provide the missing information. For more information, see Considerations before +removing an account from an organization in the Organizations User Guide. If you get an +exception that indicates that you exceeded your account limits for the organization, +contact Amazon Web Services Support. If you get an exception that indicates that the +operation failed because your organization is still initializing, wait one hour and then +try again. If the error persists, contact Amazon Web Services Support. Using +CreateAccount to create multiple temporary accounts isn't recommended. You can only close +an account from the Billing and Cost Management console, and you must be signed in as the +root user. For information on the requirements and process for closing an account, see +Closing a member account in your organization in the Organizations User Guide. When you +create a member account with this operation, you can choose whether to create the account +with the IAM User and Role Access to Billing Information switch enabled. If you enable it, +IAM users and roles that have appropriate permissions can view billing information for the +account. If you disable it, only the account root user can access billing information. For +information about how to disable this switch for an account, see Granting access to your +billing information and tools. # Arguments - `account_name`: The friendly name of the member account. @@ -273,7 +272,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IamUserAccessToBilling"`: If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, - see Activating Access to the Billing and Cost Management Console in the Amazon Web Services + see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide. If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account. @@ -283,11 +282,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys administrator. The role has administrator permissions in the new member account. If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole. For more information about how to use this role to access the member account, see the following - links: Accessing and Administering the Member Accounts in Your Organization in the - Organizations User Guide Steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web - Services accounts Using IAM Roles in the IAM User Guide The regex pattern that is used - to validate this parameter. The pattern can include uppercase letters, lowercase letters, - digits with no spaces, and any of the following characters: =,.@- + links: Creating the OrganizationAccountAccessRole in an invited member account in the + Organizations User Guide Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon + Web Services accounts using IAM roles in the IAM User Guide The regex pattern that is + used to validate this parameter. The pattern can include uppercase letters, lowercase + letters, digits with no spaces, and any of the following characters: =,.@- - `"Tags"`: A list of tags that you want to attach to the newly created account. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to null. For more information about tagging, see Tagging @@ -337,7 +336,7 @@ paired with a management account of an organization in the commercial Region. this action from the management account of your organization in the commercial Region. You have the organizations:CreateGovCloudAccount permission. Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more -information, see Organizations and Service-Linked Roles in the Organizations User Guide. +information, see Organizations and service-linked roles in the Organizations User Guide. Amazon Web Services automatically enables CloudTrail for Amazon Web Services GovCloud (US) accounts, but you should also do the following: Verify that CloudTrail is enabled to store logs. Create an Amazon S3 bucket for CloudTrail log storage. For more information, @@ -360,8 +359,8 @@ account. To check the status of the request, do one of the following: Use the response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see -Monitoring the Activity in Your Organization in the Organizations User Guide. When you -call the CreateGovCloudAccount action, you create two accounts: a standalone account in the +Logging and monitoring in Organizations in the Organizations User Guide. When you call +the CreateGovCloudAccount action, you create two accounts: a standalone account in the Amazon Web Services GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated @@ -372,28 +371,29 @@ commercial account that you just created. A role is also created in the new Amaz Services GovCloud (US) account that can be assumed by the Amazon Web Services GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see -Organizations in the Amazon Web Services GovCloud User Guide. For more information about -creating accounts, see Creating an Amazon Web Services account in Your Organization in the -Organizations User Guide. When you create an account in an organization using the -Organizations console, API, or CLI commands, the information required for the account to -operate as a standalone account is not automatically collected. This includes a payment -method and signing the end user license agreement (EULA). If you must remove an account -from your organization later, you can do so only after you provide the missing information. -Follow the steps at To leave an organization as a member account in the Organizations User -Guide. If you get an exception that indicates that you exceeded your account limits for -the organization, contact Amazon Web Services Support. If you get an exception that -indicates that the operation failed because your organization is still initializing, wait -one hour and then try again. If the error persists, contact Amazon Web Services Support. -Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You -can only close an account from the Amazon Web Services Billing and Cost Management console, -and you must be signed in as the root user. For information on the requirements and process -for closing an account, see Closing an Amazon Web Services account in the Organizations -User Guide. When you create a member account with this operation, you can choose -whether to create the account with the IAM User and Role Access to Billing Information -switch enabled. If you enable it, IAM users and roles that have appropriate permissions can -view billing information for the account. If you disable it, only the account root user can -access billing information. For information about how to disable this switch for an -account, see Granting Access to Your Billing Information and Tools. +Organizations in the Amazon Web Services GovCloud User Guide. For more information about +creating accounts, see Creating a member account in your organization in the Organizations +User Guide. When you create an account in an organization using the Organizations +console, API, or CLI commands, the information required for the account to operate as a +standalone account is not automatically collected. This includes a payment method and +signing the end user license agreement (EULA). If you must remove an account from your +organization later, you can do so only after you provide the missing information. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. If you get an exception that indicates that you exceeded your +account limits for the organization, contact Amazon Web Services Support. If you get an +exception that indicates that the operation failed because your organization is still +initializing, wait one hour and then try again. If the error persists, contact Amazon Web +Services Support. Using CreateGovCloudAccount to create multiple temporary accounts isn't +recommended. You can only close an account from the Amazon Web Services Billing and Cost +Management console, and you must be signed in as the root user. For information on the +requirements and process for closing an account, see Closing a member account in your +organization in the Organizations User Guide. When you create a member account with +this operation, you can choose whether to create the account with the IAM User and Role +Access to Billing Information switch enabled. If you enable it, IAM users and roles that +have appropriate permissions can view billing information for the account. If you disable +it, only the account root user can access billing information. For information about how to +disable this switch for an account, see Granting access to your billing information and +tools. # Arguments - `account_name`: The friendly name of the member account. The account name can consist of @@ -419,8 +419,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IamUserAccessToBilling"`: If set to ALLOW, the new linked account in the commercial Region enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account - billing information. For more information, see Activating Access to the Billing and Cost - Management Console in the Amazon Web Services Billing and Cost Management User Guide. If + billing information. For more information, see About IAM access to the Billing and Cost + Management console in the Amazon Web Services Billing and Cost Management User Guide. If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account. - `"RoleName"`: (Optional) The name of an IAM role that Organizations automatically @@ -429,12 +429,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account. If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole. For - more information about how to use this role to access the member account, see Accessing and - Administering the Member Accounts in Your Organization in the Organizations User Guide and - steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web Services accounts Using IAM - Roles in the IAM User Guide. The regex pattern that is used to validate this parameter. - The pattern can include uppercase letters, lowercase letters, digits with no spaces, and - any of the following characters: =,.@- + more information about how to use this role to access the member account, see the following + links: Creating the OrganizationAccountAccessRole in an invited member account in the + Organizations User Guide Steps 2 and 3 in IAM Tutorial: Delegate access across Amazon + Web Services accounts using IAM roles in the IAM User Guide The regex pattern that is + used to validate this parameter. The pattern can include uppercase letters, lowercase + letters, digits with no spaces, and any of the following characters: =,.@- - `"Tags"`: A list of tags that you want to attach to the newly created account. These tags are attached to the commercial account associated with the GovCloud account, and not to the GovCloud account itself. To add tags to the actual GovCloud account, call the TagResource @@ -487,15 +487,15 @@ IAM permissions. By default (or if you set the FeatureSet parameter to ALL), the organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to -CONSOLIDATED_BILLING\", no policy types are enabled by default, and you can't use -organization policies +CONSOLIDATED_BILLING, no policy types are enabled by default and you can't use organization +policies. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"FeatureSet"`: Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality. CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more - information, see Consolidated billing in the Organizations User Guide. The consolidated + information, see Consolidated billing in the Organizations User Guide. The consolidated billing feature subset isn't available for organizations in the Amazon Web Services GovCloud (US) Region. ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member @@ -523,8 +523,8 @@ Creates an organizational unit (OU) within a root or parent OU. An OU is a conta accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five. -For more information about OUs, see Managing Organizational Units in the Organizations User -Guide. If the request includes tags, then the requester must have the +For more information about OUs, see Managing organizational units (OUs) in the +Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. This operation can be called only from the organization's management account. @@ -580,13 +580,16 @@ end Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account. For more information about policies and -their use, see Managing Organization Policies. If the request includes tags, then the +their use, see Managing Organizations policies. If the request includes tags, then the requester must have the organizations:TagResource permission. This operation can be called -only from the organization's management account. +only from the organization's management account or by a member account that is a delegated +administrator for an Amazon Web Services service. # Arguments - `content`: The policy text content to add to the new policy. The text that you supply - must adhere to the rules of the policy type you specify in the Type parameter. + must adhere to the rules of the policy type you specify in the Type parameter. The maximum + size of a policy document depends on the policy's type. For more information, see Maximum + and minimum values in the Organizations User Guide. - `description`: An optional description to assign to the policy. - `name`: The friendly name to assign to the policy. The regex pattern that is used to validate this parameter is a string of any of the characters in the ASCII character range. @@ -756,7 +759,8 @@ end Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts. This -operation can be called only from the organization's management account. +operation can be called only from the organization's management account or by a member +account that is a delegated administrator for an Amazon Web Services service. # Arguments - `policy_id`: The unique identifier (ID) of the policy that you want to delete. You can @@ -958,9 +962,9 @@ Returns the contents of the effective policy for specified policy type and accou effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account. This operation applies only to policy types other than service control policies (SCPs). For more -information about policy inheritance, see How Policy Inheritance Works in the Organizations -User Guide. This operation can be called only from the organization's management account or -by a member account that is a delegated administrator for an Amazon Web Services service. +information about policy inheritance, see Understanding management policy inheritance in +the Organizations User Guide. This operation can be called from any account in the +organization. # Arguments - `policy_type`: The type of policy that you want information about. You can specify one of @@ -1151,7 +1155,7 @@ end describe_resource_policy() describe_resource_policy(params::Dict{String,<:Any}) -Retrieves information about a resource policy. You can only call this operation from the +Retrieves information about a resource policy. This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service. @@ -1186,7 +1190,8 @@ authorization strategy of an \"allow list\". If you instead attach a second SCP the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\". This operation can be called -only from the organization's management account. +only from the organization's management account or by a member account that is a delegated +administrator for an Amazon Web Services service. # Arguments - `policy_id`: The unique identifier (ID) of the policy you want to detach. You can get the @@ -1269,9 +1274,9 @@ resources in the organization's accounts depends on that service. For more infor the documentation for the other Amazon Web Services service. After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts For more information about integrating other services with -Organizations, including the list of services that work with Organizations, see Integrating -Organizations with Other Amazon Web Services Services in the Organizations User Guide. -This operation can be called only from the organization's management account. +Organizations, including the list of services that work with Organizations, see Using +Organizations with other Amazon Web Services services in the Organizations User Guide. This +operation can be called only from the organization's management account. # Arguments - `service_principal`: The service principal name of the Amazon Web Services service for @@ -1319,7 +1324,8 @@ performs in the background. If you disable a policy type for a root, it still ap enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation. This operation can be called only from the -organization's management account. To view the status of available policy types in the +organization's management account or by a member account that is a delegated administrator +for an Amazon Web Services service. To view the status of available policy types in the organization, use DescribeOrganization. # Arguments @@ -1369,7 +1375,7 @@ Enables all features in an organization. This enables the use of organization po can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that Organizations supports. For more information, -see Enabling All Features in Your Organization in the Organizations User Guide. This +see Enabling all features in your organization in the Organizations User Guide. This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the @@ -1414,8 +1420,8 @@ specified service. Doing so ensures that the service is aware that it can create resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about -enabling services to integrate with Organizations, see Integrating Organizations with Other -Amazon Web Services Services in the Organizations User Guide. You can only call this +enabling services to integrate with Organizations, see Using Organizations with other +Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features. @@ -1462,7 +1468,8 @@ You can undo this by using the DisablePolicyType operation. This is an asynchron that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation. This operation can be called only from the organization's management -account. You can enable a policy type in a root only if that policy type is available in +account or by a member account that is a delegated administrator for an Amazon Web Services +service. You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization. @@ -1517,7 +1524,7 @@ the management account. For example, if your organization's management account w by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more -information, see Consolidated Billing in India. If you receive an exception that +information, see Consolidated billing in India. If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support. If the request @@ -1591,20 +1598,22 @@ standalone, you must perform the following steps. If any of the steps are alread for this account, that step doesn't appear. Choose a support plan Provide and verify the required contact information Provide a current payment method Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services -activity that occurs while the account isn't attached to an organization. Follow the steps -at To leave an organization when all required account information has not yet been -provided in the Organizations User Guide. The account that you want to leave must not be -a delegated administrator account for any Amazon Web Services service enabled for your -organization. If the account is a delegated administrator, you must first change the -delegated administrator account to another account that is remaining in the organization. -You can leave an organization only after you enable IAM user access to billing in your -account. For more information, see Activating Access to the Billing and Cost Management -Console in the Amazon Web Services Billing and Cost Management User Guide. After the -account leaves the organization, all tags that were attached to the account object in the -organization are deleted. Amazon Web Services accounts outside of an organization do not -support tags. A newly created account has a waiting period before it can be removed from -its organization. If you get an error that indicates that a wait period is required, then -try again in a few days. +activity that occurs while the account isn't attached to an organization. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. The account that you want to leave must not be a delegated +administrator account for any Amazon Web Services service enabled for your organization. If +the account is a delegated administrator, you must first change the delegated administrator +account to another account that is remaining in the organization. You can leave an +organization only after you enable IAM user access to billing in your account. For more +information, see About IAM access to the Billing and Cost Management console in the Amazon +Web Services Billing and Cost Management User Guide. After the account leaves the +organization, all tags that were attached to the account object in the organization are +deleted. Amazon Web Services accounts outside of an organization do not support tags. A +newly created account has a waiting period before it can be removed from its organization. +If you get an error that indicates that a wait period is required, then try again in a few +days. If you are using an organization principal to call LeaveOrganization across +multiple accounts, you can only do this up to 5 accounts per second in a single +organization. """ function leave_organization(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1728,9 +1737,9 @@ Returns a list of the Amazon Web Services services that you enabled to integrate organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts. For more information about integrating other services with Organizations, including the list of -services that currently work with Organizations, see Integrating Organizations with Other -Amazon Web Services Services in the Organizations User Guide. This operation can be called -only from the organization's management account or by a member account that is a delegated +services that currently work with Organizations, see Using Organizations with other Amazon +Web Services services in the Organizations User Guide. This operation can be called only +from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service. # Optional Parameters @@ -2549,7 +2558,7 @@ organization's management account. # Arguments - `content`: If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For - more information, see Service Control Policy Syntax in the Organizations User Guide. + more information, see SCP syntax in the Organizations User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2645,20 +2654,14 @@ management account. Member accounts can remove themselves with LeaveOrganization You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information -required of standalone accounts is not automatically collected. For an account that you -want to make standalone, you must choose a support plan, provide and verify the required -contact information, and provide a current payment method. Amazon Web Services uses the -payment method to charge for any billable (not free tier) Amazon Web Services activity that -occurs while the account isn't attached to an organization. To remove an account that -doesn't yet have this information, you must sign in as the member account and follow the -steps at To leave an organization when all required account information has not yet been -provided in the Organizations User Guide. The account that you want to leave must not be -a delegated administrator account for any Amazon Web Services service enabled for your -organization. If the account is a delegated administrator, you must first change the -delegated administrator account to another account that is remaining in the organization. -After the account leaves the organization, all tags that were attached to the account -object in the organization are deleted. Amazon Web Services accounts outside of an -organization do not support tags. +required of standalone accounts is not automatically collected. For more information, see +Considerations before removing an account from an organization in the Organizations User +Guide. The account that you want to leave must not be a delegated administrator account +for any Amazon Web Services service enabled for your organization. If the account is a +delegated administrator, you must first change the delegated administrator account to +another account that is remaining in the organization. After the account leaves the +organization, all tags that were attached to the account object in the organization are +deleted. Amazon Web Services accounts outside of an organization do not support tags. # Arguments - `account_id`: The unique identifier (ID) of the member account that you want to remove @@ -2698,7 +2701,8 @@ end Adds one or more tags to the specified resource. Currently, you can attach tags to the following resources in Organizations. Amazon Web Services account Organization root Organizational unit (OU) Policy (any type) This operation can be called only from the -organization's management account. +organization's management account or by a member account that is a delegated administrator +for an Amazon Web Services service. # Arguments - `resource_id`: The ID of the resource to add a tag to. You can specify any of the @@ -2746,7 +2750,8 @@ end Removes any tags with the specified keys from the specified resource. You can attach tags to the following resources in Organizations. Amazon Web Services account Organization root Organizational unit (OU) Policy (any type) This operation can be called only -from the organization's management account. +from the organization's management account or by a member account that is a delegated +administrator for an Amazon Web Services service. # Arguments - `resource_id`: The ID of the resource to remove a tag from. You can specify any of the @@ -2844,7 +2849,8 @@ end Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type. This -operation can be called only from the organization's management account. +operation can be called only from the organization's management account or by a member +account that is a delegated administrator for an Amazon Web Services service. # Arguments - `policy_id`: The unique identifier (ID) of the policy that you want to update. The regex @@ -2855,7 +2861,9 @@ operation can be called only from the organization's management account. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Content"`: If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, - see Service Control Policy Syntax in the Organizations User Guide. + see SCP syntax in the Organizations User Guide. The maximum size of a policy document + depends on the policy's type. For more information, see Maximum and minimum values in the + Organizations User Guide. - `"Description"`: If provided, the new description for the policy. - `"Name"`: If provided, the new name for the policy. The regex pattern that is used to validate this parameter is a string of any of the characters in the ASCII character range. diff --git a/src/services/osis.jl b/src/services/osis.jl index 148264213e..21976b557b 100644 --- a/src/services/osis.jl +++ b/src/services/osis.jl @@ -22,6 +22,9 @@ OpenSearch Ingestion pipelines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"BufferOptions"`: Key-value pairs to configure persistent buffering for the pipeline. +- `"EncryptionAtRestOptions"`: Key-value pairs to configure encryption for data that is + written to a persistent buffer. - `"LogPublishingOptions"`: Key-value pairs to configure log publishing. - `"Tags"`: List of tags to add to the pipeline upon creation. - `"VpcOptions"`: Container for the values required to configure VPC access for the @@ -116,7 +119,7 @@ end Retrieves information about an OpenSearch Ingestion pipeline. # Arguments -- `pipeline_name`: The name of the pipeline to get information about. +- `pipeline_name`: The name of the pipeline. """ function get_pipeline(PipelineName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -152,6 +155,9 @@ see Using blueprints to create a pipeline. # Arguments - `blueprint_name`: The name of the blueprint to retrieve. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"format"`: The format format of the blueprint to retrieve. """ function get_pipeline_blueprint( BlueprintName; aws_config::AbstractAWSConfig=global_aws_config() @@ -462,6 +468,9 @@ OpenSearch Ingestion pipelines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"BufferOptions"`: Key-value pairs to configure persistent buffering for the pipeline. +- `"EncryptionAtRestOptions"`: Key-value pairs to configure encryption for data that is + written to a persistent buffer. - `"LogPublishingOptions"`: Key-value pairs to configure log publishing. - `"MaxUnits"`: The maximum pipeline capacity, in Ingestion Compute Units (ICUs) - `"MinUnits"`: The minimum pipeline capacity, in Ingestion Compute Units (ICUs). diff --git a/src/services/outposts.jl b/src/services/outposts.jl index f20bad436d..bd63c3f18d 100644 --- a/src/services/outposts.jl +++ b/src/services/outposts.jl @@ -4,6 +4,43 @@ using AWS.AWSServices: outposts using AWS.Compat using AWS.UUIDs +""" + cancel_capacity_task(capacity_task_id, outpost_id) + cancel_capacity_task(capacity_task_id, outpost_id, params::Dict{String,<:Any}) + +Cancels the capacity task. + +# Arguments +- `capacity_task_id`: ID of the capacity task that you want to cancel. +- `outpost_id`: ID or ARN of the Outpost associated with the capacity task that you want to + cancel. + +""" +function cancel_capacity_task( + CapacityTaskId, OutpostId; aws_config::AbstractAWSConfig=global_aws_config() +) + return outposts( + "POST", + "/outposts/$(OutpostId)/capacity/$(CapacityTaskId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_capacity_task( + CapacityTaskId, + OutpostId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return outposts( + "POST", + "/outposts/$(OutpostId)/capacity/$(CapacityTaskId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_order(order_id) cancel_order(order_id, params::Dict{String,<:Any}) @@ -189,7 +226,7 @@ end Deletes the specified Outpost. # Arguments -- `outpost_id`: The ID or the Amazon Resource Name (ARN) of the Outpost. +- `outpost_id`: The ID or ARN of the Outpost. """ function delete_outpost(OutpostId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -241,6 +278,42 @@ function delete_site( ) end +""" + get_capacity_task(capacity_task_id, outpost_id) + get_capacity_task(capacity_task_id, outpost_id, params::Dict{String,<:Any}) + +Gets details of the specified capacity task. + +# Arguments +- `capacity_task_id`: ID of the capacity task. +- `outpost_id`: ID or ARN of the Outpost associated with the specified capacity task. + +""" +function get_capacity_task( + CapacityTaskId, OutpostId; aws_config::AbstractAWSConfig=global_aws_config() +) + return outposts( + "GET", + "/outposts/$(OutpostId)/capacity/$(CapacityTaskId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_capacity_task( + CapacityTaskId, + OutpostId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return outposts( + "GET", + "/outposts/$(OutpostId)/capacity/$(CapacityTaskId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_catalog_item(catalog_item_id) get_catalog_item(catalog_item_id, params::Dict{String,<:Any}) @@ -344,7 +417,7 @@ end Gets information about the specified Outpost. # Arguments -- `outpost_id`: The ID or the Amazon Resource Name (ARN) of the Outpost. +- `outpost_id`: The ID or ARN of the Outpost. """ function get_outpost(OutpostId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -376,7 +449,7 @@ end Gets the instance types for the specified Outpost. # Arguments -- `outpost_id`: The ID or the Amazon Resource Name (ARN) of the Outpost. +- `outpost_id`: The ID or ARN of the Outpost. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -407,6 +480,49 @@ function get_outpost_instance_types( ) end +""" + get_outpost_supported_instance_types(order_id, outpost_id) + get_outpost_supported_instance_types(order_id, outpost_id, params::Dict{String,<:Any}) + +Gets the instance types that an Outpost can support in InstanceTypeCapacity. This will +generally include instance types that are not currently configured and therefore cannot be +launched with the current Outpost capacity configuration. + +# Arguments +- `order_id`: The ID for the Amazon Web Services Outposts order. +- `outpost_id`: The ID or ARN of the Outpost. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: +- `"NextToken"`: +""" +function get_outpost_supported_instance_types( + OrderId, OutpostId; aws_config::AbstractAWSConfig=global_aws_config() +) + return outposts( + "GET", + "/outposts/$(OutpostId)/supportedInstanceTypes", + Dict{String,Any}("OrderId" => OrderId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_outpost_supported_instance_types( + OrderId, + OutpostId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return outposts( + "GET", + "/outposts/$(OutpostId)/supportedInstanceTypes", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("OrderId" => OrderId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_site(site_id) get_site(site_id, params::Dict{String,<:Any}) @@ -514,6 +630,41 @@ function list_assets( ) end +""" + list_capacity_tasks() + list_capacity_tasks(params::Dict{String,<:Any}) + +Lists the capacity tasks for your Amazon Web Services account. Use filters to return +specific results. If you specify multiple filters, the results include only the resources +that match all of the specified filters. For a filter where you can specify multiple +values, the results include items that match any of the values that you specify for the +filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CapacityTaskStatusFilter"`: A list of statuses. For example, REQUESTED or + WAITING_FOR_EVACUATION. +- `"MaxResults"`: +- `"NextToken"`: +- `"OutpostIdentifierFilter"`: Filters the results by an Outpost ID or an Outpost ARN. +""" +function list_capacity_tasks(; aws_config::AbstractAWSConfig=global_aws_config()) + return outposts( + "GET", "/capacity/tasks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_capacity_tasks( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return outposts( + "GET", + "/capacity/tasks", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_catalog_items() list_catalog_items(params::Dict{String,<:Any}) @@ -672,8 +823,59 @@ function list_tags_for_resource( end """ - start_connection(asset_id, client_public_key, device_serial_number, network_interface_device_index) - start_connection(asset_id, client_public_key, device_serial_number, network_interface_device_index, params::Dict{String,<:Any}) + start_capacity_task(instance_pools, order_id, outpost_id) + start_capacity_task(instance_pools, order_id, outpost_id, params::Dict{String,<:Any}) + +Starts the specified capacity task. You can have one active capacity task for an order. + +# Arguments +- `instance_pools`: The instance pools specified in the capacity task. +- `order_id`: The ID of the Amazon Web Services Outposts order associated with the + specified capacity task. +- `outpost_id`: The ID or ARN of the Outposts associated with the specified capacity task. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: You can request a dry run to determine if the instance type and instance size + changes is above or below available instance capacity. Requesting a dry run does not make + any changes to your plan. +""" +function start_capacity_task( + InstancePools, OrderId, OutpostId; aws_config::AbstractAWSConfig=global_aws_config() +) + return outposts( + "POST", + "/outposts/$(OutpostId)/capacity", + Dict{String,Any}("InstancePools" => InstancePools, "OrderId" => OrderId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_capacity_task( + InstancePools, + OrderId, + OutpostId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return outposts( + "POST", + "/outposts/$(OutpostId)/capacity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstancePools" => InstancePools, "OrderId" => OrderId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_connection(asset_id, client_public_key, network_interface_device_index) + start_connection(asset_id, client_public_key, network_interface_device_index, params::Dict{String,<:Any}) Amazon Web Services uses this action to install Outpost servers. Starts the connection required for Outpost server installation. Use CloudTrail to monitor this action or Amazon @@ -685,15 +887,16 @@ Amazon Web Services Outposts User Guide. # Arguments - `asset_id`: The ID of the Outpost server. - `client_public_key`: The public key of the client. -- `device_serial_number`: The serial number of the dongle. - `network_interface_device_index`: The device index of the network interface on the Outpost server. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeviceSerialNumber"`: The serial number of the dongle. """ function start_connection( AssetId, ClientPublicKey, - DeviceSerialNumber, NetworkInterfaceDeviceIndex; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -703,7 +906,6 @@ function start_connection( Dict{String,Any}( "AssetId" => AssetId, "ClientPublicKey" => ClientPublicKey, - "DeviceSerialNumber" => DeviceSerialNumber, "NetworkInterfaceDeviceIndex" => NetworkInterfaceDeviceIndex, ); aws_config=aws_config, @@ -713,7 +915,6 @@ end function start_connection( AssetId, ClientPublicKey, - DeviceSerialNumber, NetworkInterfaceDeviceIndex, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -727,7 +928,6 @@ function start_connection( Dict{String,Any}( "AssetId" => AssetId, "ClientPublicKey" => ClientPublicKey, - "DeviceSerialNumber" => DeviceSerialNumber, "NetworkInterfaceDeviceIndex" => NetworkInterfaceDeviceIndex, ), params, @@ -817,7 +1017,7 @@ end Updates an Outpost. # Arguments -- `outpost_id`: The ID or the Amazon Resource Name (ARN) of the Outpost. +- `outpost_id`: The ID or ARN of the Outpost. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/payment_cryptography.jl b/src/services/payment_cryptography.jl index 46b98e4799..a0591b8d67 100644 --- a/src/services/payment_cryptography.jl +++ b/src/services/payment_cryptography.jl @@ -22,11 +22,11 @@ Services accounts. Related operations: DeleteAlias GetAlias ListAli UpdateAlias # Arguments -- `alias_name`: A friendly name that you can use to refer a key. An alias must begin with - alias/ followed by a name, for example alias/ExampleAlias. It can contain only alphanumeric - characters, forward slashes (/), underscores (_), and dashes (-). Don't include - confidential or sensitive information in this field. This field may be displayed in - plaintext in CloudTrail logs and other output. +- `alias_name`: A friendly name that you can use to refer to a key. An alias must begin + with alias/ followed by a name, for example alias/ExampleAlias. It can contain only + alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). Don't + include personal, confidential or sensitive information in this field. This field may be + displayed in plaintext in CloudTrail logs and other output. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -65,7 +65,7 @@ keys for cryptographic functions such as encryption and decryption. In addition material used in cryptographic operations, an Amazon Web Services Payment Cryptography key includes metadata such as the key ARN, key usage, key origin, creation date, description, and key state. When you create a key, you specify both immutable and mutable data about the -key. The immutable data contains key attributes that defines the scope and cryptographic +key. The immutable data contains key attributes that define the scope and cryptographic operations that you can perform using the key, for example key class (example: SYMMETRIC_KEY), key algorithm (example: TDES_2KEY), key usage (example: TR31_P0_PIN_ENCRYPTION_KEY) and key modes of use (example: Encrypt). For information about @@ -86,21 +86,23 @@ be used across different Amazon Web Services accounts. Related operations: # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Enabled"`: Specifies whether to enable the key. If the key is enabled, it is activated - for use within the service. If the key not enabled, then it is created but not activated. - The default value is enabled. + for use within the service. If the key is not enabled, then it is created but not + activated. The default value is enabled. - `"KeyCheckValueAlgorithm"`: The algorithm that Amazon Web Services Payment Cryptography - uses to calculate the key check value (KCV) for DES and AES keys. For DES key, the KCV is - computed by encrypting 8 bytes, each with value '00', with the key to be checked and - retaining the 3 highest order bytes of the encrypted result. For AES key, the KCV is - computed by encrypting 8 bytes, each with value '01', with the key to be checked and - retaining the 3 highest order bytes of the encrypted result. -- `"Tags"`: The tags to attach to the key. Each tag consists of a tag key and a tag value. - Both the tag key and the tag value are required, but the tag value can be an empty (null) - string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key - with the same tag key. To use this parameter, you must have TagResource permission. Don't - include confidential or sensitive information in this field. This field may be displayed in - plaintext in CloudTrail logs and other output. Tagging or untagging an Amazon Web - Services Payment Cryptography key can allow or deny permission to the key. + uses to calculate the key check value (KCV). It is used to validate the key integrity. For + TDES keys, the KCV is computed by encrypting 8 bytes, each with value of zero, with the key + to be checked and retaining the 3 highest order bytes of the encrypted result. For AES + keys, the KCV is computed using a CMAC algorithm where the input data is 16 bytes of zero + and retaining the 3 highest order bytes of the encrypted result. +- `"Tags"`: Assigns one or more tags to the Amazon Web Services Payment Cryptography key. + Use this parameter to tag a key when it is created. To tag an existing Amazon Web Services + Payment Cryptography key, use the TagResource operation. Each tag consists of a tag key and + a tag value. Both the tag key and the tag value are required, but the tag value can be an + empty (null) string. You can't have more than one tag on an Amazon Web Services Payment + Cryptography key with the same tag key. Don't include personal, confidential or sensitive + information in this field. This field may be displayed in plaintext in CloudTrail logs and + other output. Tagging or untagging an Amazon Web Services Payment Cryptography key can + allow or deny permission to the key. """ function create_key( Exportable, KeyAttributes; aws_config::AbstractAWSConfig=global_aws_config() @@ -139,7 +141,7 @@ end delete_alias(alias_name, params::Dict{String,<:Any}) Deletes the alias, but doesn't affect the underlying key. Each key can have multiple -aliases. To get the aliases of all keys, use the ListAliases operation. To change the alias +aliases. To get the aliases of all keys, use the UpdateAlias operation. To change the alias of a key, first use DeleteAlias to delete the current alias and then use CreateAlias to create a new alias. To associate an existing alias with a different key, call UpdateAlias. Cross-account use: This operation can't be used across different Amazon Web Services @@ -179,22 +181,20 @@ end delete_key(key_identifier) delete_key(key_identifier, params::Dict{String,<:Any}) -Deletes the key material and all metadata associated with Amazon Web Services Payment +Deletes the key material and metadata associated with Amazon Web Services Payment Cryptography key. Key deletion is irreversible. After a key is deleted, you can't perform cryptographic operations using the key. For example, you can't decrypt data that was encrypted by a deleted Amazon Web Services Payment Cryptography key, and the data may become unrecoverable. Because key deletion is destructive, Amazon Web Services Payment Cryptography has a safety mechanism to prevent accidental deletion of a key. When you call this operation, Amazon Web Services Payment Cryptography disables the specified key but -doesn't delete it until after a waiting period. The default waiting period is 7 days. To -set a different waiting period, set DeleteKeyInDays. During the waiting period, the -KeyState is DELETE_PENDING. After the key is deleted, the KeyState is DELETE_COMPLETE. If -you delete key material, you can use ImportKey to reimport the same key material into the -Amazon Web Services Payment Cryptography key. You should delete a key only when you are -sure that you don't need to use it anymore and no other parties are utilizing this key. If -you aren't sure, consider deactivating it instead by calling StopKeyUsage. Cross-account -use: This operation can't be used across different Amazon Web Services accounts. Related -operations: RestoreKey StartKeyUsage StopKeyUsage +doesn't delete it until after a waiting period set using DeleteKeyInDays. The default +waiting period is 7 days. During the waiting period, the KeyState is DELETE_PENDING. After +the key is deleted, the KeyState is DELETE_COMPLETE. You should delete a key only when you +are sure that you don't need to use it anymore and no other parties are utilizing this key. +If you aren't sure, consider deactivating it instead by calling StopKeyUsage. +Cross-account use: This operation can't be used across different Amazon Web Services +accounts. Related operations: RestoreKey StartKeyUsage StopKeyUsage # Arguments - `key_identifier`: The KeyARN of the key that is scheduled for deletion. @@ -230,39 +230,93 @@ end export_key(export_key_identifier, key_material) export_key(export_key_identifier, key_material, params::Dict{String,<:Any}) -Exports a key from Amazon Web Services Payment Cryptography using either ANSI X9 TR-34 or -TR-31 key export standard. Amazon Web Services Payment Cryptography simplifies main or root -key exchange process by eliminating the need of a paper-based key exchange process. It -takes a modern and secure approach based of the ANSI X9 TR-34 key exchange standard. You -can use ExportKey to export main or root keys such as KEK (Key Encryption Key), using -asymmetric key exchange technique following ANSI X9 TR-34 standard. The ANSI X9 TR-34 -standard uses asymmetric keys to establishes bi-directional trust between the two parties -exchanging keys. After which you can export working keys using the ANSI X9 TR-31 symmetric -key exchange standard as mandated by PCI PIN. Using this operation, you can share your +Exports a key from Amazon Web Services Payment Cryptography. Amazon Web Services Payment +Cryptography simplifies key exchange by replacing the existing paper-based approach with a +modern electronic approach. With ExportKey you can export symmetric keys using either +symmetric and asymmetric key exchange mechanisms. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to -perform cryptographic operations outside of Amazon Web Services Payment Cryptography -TR-34 key export Amazon Web Services Payment Cryptography uses TR-34 asymmetric key -exchange standard to export main keys such as KEK. In TR-34 terminology, the sending party -of the key is called Key Distribution Host (KDH) and the receiving party of the key is -called Key Receiving Host (KRH). In key export process, KDH is Amazon Web Services Payment -Cryptography which initiates key export. KRH is the user receiving the key. Before you -initiate TR-34 key export, you must obtain an export token by calling -GetParametersForExport. This operation also returns the signing key certificate that KDH -uses to sign the wrapped key to generate a TR-34 wrapped key block. The export token -expires after 7 days. Set the following parameters: -CertificateAuthorityPublicKeyIdentifier The KeyARN of the certificate chain that will sign -the wrapping key certificate. This must exist within Amazon Web Services Payment -Cryptography before you initiate TR-34 key export. If it does not exist, you can import it -by calling ImportKey for RootCertificatePublicKey. ExportToken Obtained from KDH by -calling GetParametersForExport. WrappingKeyCertificate Amazon Web Services Payment -Cryptography uses this to wrap the key under export. When this operation is successful, -Amazon Web Services Payment Cryptography returns the TR-34 wrapped key block. TR-31 key -export Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange standard -to export working keys. In TR-31, you must use a main key such as KEK to encrypt or wrap -the key under export. To establish a KEK, you can use CreateKey or ImportKey. When this -operation is successful, Amazon Web Services Payment Cryptography returns a TR-31 wrapped -key block. Cross-account use: This operation can't be used across different Amazon Web -Services accounts. Related operations: GetParametersForExport ImportKey +perform cryptographic operations outside of Amazon Web Services Payment Cryptography For +symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 +norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web +Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key +exchange mechanism. Asymmetric key exchange methods are typically used to establish +bi-directional trust between the two parties exhanging keys and are used for initial key +exchange such as Key Encryption Key (KEK). After which you can export working keys using +symmetric method to perform various cryptographic operations within Amazon Web Services +Payment Cryptography. The TR-34 norm is intended for exchanging 3DES keys only and keys are +imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, +KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, +you can exchange both 3DES and AES-128 keys. The keys are imported in a +WrappedKeyCryptogram format and you will need to specify the key attributes during import. +You can also use ExportKey functionality to generate and export an IPEK (Initial Pin +Encryption Key) from Amazon Web Services Payment Cryptography using either TR-31 or TR-34 +export key exchange. IPEK is generated from BDK (Base Derivation Key) and +ExportDukptInitialKey attribute KSN (KeySerialNumber). The generated IPEK does not persist +within Amazon Web Services Payment Cryptography and has to be re-generated each time during +export. For key exchange using TR-31 or TR-34 key blocks, you can also export optional +blocks within the key block header which contain additional attribute information about the +key. The KeyVersion within KeyBlockHeaders indicates the version of the key within the key +block. Furthermore, KeyExportability within KeyBlockHeaders can be used to further restrict +exportability of the key after export from Amazon Web Services Payment Cryptography. The +OptionalBlocks contain the additional data related to the key. For information on data type +that can be included within optional blocks, refer to ASC X9.143-2022. Data included in +key block headers is signed but transmitted in clear text. Sensitive or confidential +information should not be included in optional blocks. Refer to ASC X9.143-2022 standard +for information on allowed data type. To export initial keys (KEK) or IPEK using TR-34 +Using this operation, you can export initial key using TR-34 asymmetric key exchange. You +can only export KEK generated within Amazon Web Services Payment Cryptography. In TR-34 +terminology, the sending party of the key is called Key Distribution Host (KDH) and the +receiving party of the key is called Key Receiving Device (KRD). During key export process, +KDH is Amazon Web Services Payment Cryptography which initiates key export and KRD is the +user receiving the key. To initiate TR-34 key export, the KRD must obtain an export token +by calling GetParametersForExport. This operation also generates a key pair for the purpose +of key export, signs the key and returns back the signing public key certificate (also +known as KDH signing certificate) and root certificate chain. The KDH uses the private key +to sign the the export payload and the signing public key certificate is provided to KRD to +verify the signature. The KRD can import the root certificate into its Hardware Security +Module (HSM), as required. The export token and the associated KDH signing certificate +expires after 7 days. Next the KRD generates a key pair for the the purpose of encrypting +the KDH key and provides the public key cerificate (also known as KRD wrapping certificate) +back to KDH. The KRD will also import the root cerificate chain into Amazon Web Services +Payment Cryptography by calling ImportKey for RootCertificatePublicKey. The KDH, Amazon Web +Services Payment Cryptography, will use the KRD wrapping cerificate to encrypt (wrap) the +key under export and signs it with signing private key to generate a TR-34 WrappedKeyBlock. +For more information on TR-34 key export, see section Exporting symmetric keys in the +Amazon Web Services Payment Cryptography User Guide. Set the following parameters: +ExportAttributes: Specify export attributes in case of IPEK export. This parameter is +optional for KEK export. ExportKeyIdentifier: The KeyARN of the KEK or BDK (in case of +IPEK) under export. KeyMaterial: Use Tr34KeyBlock parameters. +CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed +the KRD wrapping key certificate. ExportToken: Obtained from KDH by calling +GetParametersForImport. WrappingKeyCertificate: The public key certificate in PEM format +(base64 encoded) of the KRD wrapping key Amazon Web Services Payment Cryptography uses for +encryption of the TR-34 export payload. This certificate must be signed by the root +certificate (CertificateAuthorityPublicKeyIdentifier) imported into Amazon Web Services +Payment Cryptography. When this operation is successful, Amazon Web Services Payment +Cryptography returns the KEK or IPEK as a TR-34 WrappedKeyBlock. To export initial keys +(KEK) or IPEK using RSA Wrap and Unwrap Using this operation, you can export initial key +using asymmetric RSA wrap and unwrap key exchange method. To initiate export, generate an +asymmetric key pair on the receiving HSM and obtain the public key certificate in PEM +format (base64 encoded) for the purpose of wrapping and the root certifiate chain. Import +the root certificate into Amazon Web Services Payment Cryptography by calling ImportKey for +RootCertificatePublicKey. Next call ExportKey and set the following parameters: +CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed +wrapping key certificate. KeyMaterial: Set to KeyCryptogram. WrappingKeyCertificate: +The public key certificate in PEM format (base64 encoded) obtained by the receiving HSM and +signed by the root certificate (CertificateAuthorityPublicKeyIdentifier) imported into +Amazon Web Services Payment Cryptography. The receiving HSM uses its private key component +to unwrap the WrappedKeyCryptogram. When this operation is successful, Amazon Web +Services Payment Cryptography returns the WrappedKeyCryptogram. To export working keys or +IPEK using TR-31 Using this operation, you can export working keys or IPEK using TR-31 +symmetric key exchange. In TR-31, you must use an initial key such as KEK to encrypt or +wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey. Set the +following parameters: ExportAttributes: Specify export attributes in case of IPEK +export. This parameter is optional for KEK export. ExportKeyIdentifier: The KeyARN of +the KEK or BDK (in case of IPEK) under export. KeyMaterial: Use Tr31KeyBlock parameters. + When this operation is successful, Amazon Web Services Payment Cryptography returns the +working key or IPEK as a TR-31 WrappedKeyBlock. Cross-account use: This operation can't be +used across different Amazon Web Services accounts. Related operations: +GetParametersForExport ImportKey # Arguments - `export_key_identifier`: The KeyARN of the key under export from Amazon Web Services @@ -270,6 +324,9 @@ Services accounts. Related operations: GetParametersForExport ImportKey - `key_material`: The key block format type, for example, TR-34 or TR-31, to use during key material export. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExportAttributes"`: The attributes for IPEK generation during export. """ function export_key( ExportKeyIdentifier, KeyMaterial; aws_config::AbstractAWSConfig=global_aws_config() @@ -395,8 +452,8 @@ accounts. Related operations: ExportKey GetParametersForImport during key material export. Export token is only required for a TR-34 key export, TR34_KEY_BLOCK. Export token is not required for TR-31 key export. - `signing_key_algorithm`: The signing key algorithm to generate a signing key certificate. - This certificate signs the wrapped key under export within the TR-34 key block cryptogram. - RSA_2048 is the only signing key algorithm allowed. + This certificate signs the wrapped key under export within the TR-34 key block. RSA_2048 is + the only signing key algorithm allowed. """ function get_parameters_for_export( @@ -439,21 +496,24 @@ end get_parameters_for_import(key_material_type, wrapping_key_algorithm) get_parameters_for_import(key_material_type, wrapping_key_algorithm, params::Dict{String,<:Any}) -Gets the import token and the wrapping key certificate to initiate a TR-34 key import into -Amazon Web Services Payment Cryptography. The wrapping key certificate wraps the key under -import within the TR-34 key payload. The import token and wrapping key certificate must be -in place and operational before calling ImportKey. The import token expires in 7 days. The -same import token can be used to import multiple keys into your service account. -Cross-account use: This operation can't be used across different Amazon Web Services -accounts. Related operations: GetParametersForExport ImportKey +Gets the import token and the wrapping key certificate in PEM format (base64 encoded) to +initiate a TR-34 WrappedKeyBlock or a RSA WrappedKeyCryptogram import into Amazon Web +Services Payment Cryptography. The wrapping key certificate wraps the key under import. The +import token and wrapping key certificate must be in place and operational before calling +ImportKey. The import token expires in 7 days. You can use the same import token to import +multiple keys into your service account. Cross-account use: This operation can't be used +across different Amazon Web Services accounts. Related operations: +GetParametersForExport ImportKey # Arguments -- `key_material_type`: The key block format type such as TR-34 or TR-31 to use during key - material import. Import token is only required for TR-34 key import TR34_KEY_BLOCK. Import - token is not required for TR-31 key import. +- `key_material_type`: The method to use for key material import. Import token is only + required for TR-34 WrappedKeyBlock (TR34_KEY_BLOCK) and RSA WrappedKeyCryptogram + (KEY_CRYPTOGRAM). Import token is not required for TR-31, root public key cerificate or + trusted public key certificate. - `wrapping_key_algorithm`: The wrapping key algorithm to generate a wrapping key - certificate. This certificate wraps the key under import within the TR-34 key block - cryptogram. RSA_2048 is the only wrapping key algorithm allowed. + certificate. This certificate wraps the key under import. At this time, RSA_2048 is the + allowed algorithm for TR-34 WrappedKeyBlock import. Additionally, RSA_2048, RSA_3072, + RSA_4096 are the allowed algorithms for RSA WrappedKeyCryptogram import. """ function get_parameters_for_import( @@ -538,59 +598,86 @@ end import_key(key_material) import_key(key_material, params::Dict{String,<:Any}) -Imports keys and public key certificates into Amazon Web Services Payment Cryptography. -Amazon Web Services Payment Cryptography simplifies main or root key exchange process by -eliminating the need of a paper-based key exchange process. It takes a modern and secure -approach based of the ANSI X9 TR-34 key exchange standard. You can use ImportKey to import -main or root keys such as KEK (Key Encryption Key) using asymmetric key exchange technique -following the ANSI X9 TR-34 standard. The ANSI X9 TR-34 standard uses asymmetric keys to -establishes bi-directional trust between the two parties exchanging keys. After you have -imported a main or root key, you can import working keys to perform various cryptographic -operations within Amazon Web Services Payment Cryptography using the ANSI X9 TR-31 -symmetric key exchange standard as mandated by PCI PIN. You can also import a root public -key certificate, a self-signed certificate used to sign other public key certificates, or a +Imports symmetric keys and public key certificates in PEM format (base64 encoded) into +Amazon Web Services Payment Cryptography. Amazon Web Services Payment Cryptography +simplifies key exchange by replacing the existing paper-based approach with a modern +electronic approach. With ImportKey you can import symmetric keys using either symmetric +and asymmetric key exchange mechanisms. For symmetric key exchange, Amazon Web Services +Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And +for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 +TR-34 norm and RSA wrap and unwrap key exchange mechanisms. Asymmetric key exchange methods +are typically used to establish bi-directional trust between the two parties exhanging keys +and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key +(ZMK). After which you can import working keys using symmetric method to perform various +cryptographic operations within Amazon Web Services Payment Cryptography. The TR-34 norm is +intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. +Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained +within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 +keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify +the key attributes during import. You can also import a root public key certificate, used +to sign other public key certificates, or a trusted public key certificate under an already +established root public key certificate. To import a public root key certificate You can +also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate. To import a public root key certificate Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or -signing key in TR-34, within your Amazon Web Services Payment Cryptography account. Set -the following parameters: KeyMaterial: RootCertificatePublicKey KeyClass: PUBLIC_KEY - KeyModesOfUse: Verify KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE -PublicKeyCertificate: The certificate authority used to sign the root public key -certificate. To import a trusted public key certificate The root public key certificate -must be in place and operational before you import a trusted public key certificate. Set -the following parameters: KeyMaterial: TrustedCertificatePublicKey -CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey. +signing key in TR-34, within your Amazon Web Services Payment Cryptography account. Set the +following parameters: KeyMaterial: RootCertificatePublicKey KeyClass: PUBLIC_KEY +KeyModesOfUse: Verify KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE +PublicKeyCertificate: The public key certificate in PEM format (base64 encoded) of the +private root key under import. To import a trusted public key certificate The root +public key certificate must be in place and operational before you import a trusted public +key certificate. Set the following parameters: KeyMaterial: TrustedCertificatePublicKey + CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey. KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform. -PublicKeyCertificate: The certificate authority used to sign the trusted public key -certificate. Import main keys Amazon Web Services Payment Cryptography uses TR-34 -asymmetric key exchange standard to import main keys such as KEK. In TR-34 terminology, the -sending party of the key is called Key Distribution Host (KDH) and the receiving party of -the key is called Key Receiving Host (KRH). During the key import process, KDH is the user -who initiates the key import and KRH is Amazon Web Services Payment Cryptography who -receives the key. Before initiating TR-34 key import, you must obtain an import token by -calling GetParametersForImport. This operation also returns the wrapping key certificate -that KDH uses wrap key under import to generate a TR-34 wrapped key block. The import token -expires after 7 days. Set the following parameters: -CertificateAuthorityPublicKeyIdentifier: The KeyArn of the certificate chain that will sign -the signing key certificate and should exist within Amazon Web Services Payment -Cryptography before initiating TR-34 key import. If it does not exist, you can import it by -calling by calling ImportKey for RootCertificatePublicKey. ImportToken: Obtained from -KRH by calling GetParametersForImport. WrappedKeyBlock: The TR-34 wrapped key block from -KDH. It contains the KDH key under import, wrapped with KRH provided wrapping key -certificate and signed by the KDH private signing key. This TR-34 key block is generated by -the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography. - SigningKeyCertificate: The public component of the private key that signed the KDH TR-34 -wrapped key block. In PEM certificate format. TR-34 is intended primarily to exchange -3DES keys. Your ability to export AES-128 and larger AES keys may be dependent on your -source system. Import working keys Amazon Web Services Payment Cryptography uses TR-31 -symmetric key exchange standard to import working keys. A KEK must be established within -Amazon Web Services Payment Cryptography by using TR-34 key import. To initiate a TR-31 key -import, set the following parameters: WrappedKeyBlock: The key under import and -encrypted using KEK. The TR-31 key block generated by your HSM outside of Amazon Web -Services Payment Cryptography. WrappingKeyIdentifier: The KeyArn of the KEK that Amazon -Web Services Payment Cryptography uses to decrypt or unwrap the key under import. +PublicKeyCertificate: The trusted public key certificate in PEM format (base64 encoded) +under import. To import initial keys (KEK or ZMK or similar) using TR-34 Using this +operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 +terminology, the sending party of the key is called Key Distribution Host (KDH) and the +receiving party of the key is called Key Receiving Device (KRD). During the key import +process, KDH is the user who initiates the key import and KRD is Amazon Web Services +Payment Cryptography who receives the key. To initiate TR-34 key import, the KDH must +obtain an import token by calling GetParametersForImport. This operation generates an +encryption keypair for the purpose of key import, signs the key and returns back the +wrapping key certificate (also known as KRD wrapping certificate) and the root certificate +chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to +encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and +associated KRD wrapping certificate expires after 7 days. Next the KDH generates a key pair +for the purpose of signing the encrypted KDH key and provides the public certificate of the +signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import +the root certificate chain of the KDH signing certificate by calling ImportKey for +RootCertificatePublicKey. For more information on TR-34 key import, see section Importing +symmetric keys in the Amazon Web Services Payment Cryptography User Guide. Set the +following parameters: KeyMaterial: Use Tr34KeyBlock parameters. +CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed +the KDH signing key certificate. ImportToken: Obtained from KRD by calling +GetParametersForImport. WrappedKeyBlock: The TR-34 wrapped key material from KDH. It +contains the KDH key under import, wrapped with KRD wrapping certificate and signed by KDH +signing private key. This TR-34 key block is typically generated by the KDH Hardware +Security Module (HSM) outside of Amazon Web Services Payment Cryptography. +SigningKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KDH +signing key generated under the root certificate (CertificateAuthorityPublicKeyIdentifier) +imported in Amazon Web Services Payment Cryptography. To import initial keys (KEK or ZMK +or similar) using RSA Wrap and Unwrap Using this operation, you can import initial key +using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call +GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. +This operation also generates an encryption keypair for the purpose of key import, signs +the key and returns back the wrapping key certificate in PEM format (base64 encoded) and +its root certificate chain. The import token and associated KRD wrapping certificate +expires after 7 days. You must trust and install the wrapping certificate and its +certificate chain on the sending HSM and use it to wrap the key under export for +WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM +and provide the ImportToken and KeyAttributes for the key under import. To import working +keys using TR-31 Amazon Web Services Payment Cryptography uses TR-31 symmetric key +exchange norm to import working keys. A KEK must be established within Amazon Web Services +Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 +key import, set the following parameters: KeyMaterial: Use Tr31KeyBlock parameters. +WrappedKeyBlock: The TR-31 wrapped key material. It contains the key under import, +encrypted using KEK. The TR-31 key block is typically generated by a HSM outside of Amazon +Web Services Payment Cryptography. WrappingKeyIdentifier: The KeyArn of the KEK that +Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: ExportKey GetParametersForImport @@ -602,21 +689,22 @@ accounts. Related operations: ExportKey GetParametersForImport Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Enabled"`: Specifies whether import key is enabled. - `"KeyCheckValueAlgorithm"`: The algorithm that Amazon Web Services Payment Cryptography - uses to calculate the key check value (KCV) for DES and AES keys. For DES key, the KCV is - computed by encrypting 8 bytes, each with value '00', with the key to be checked and - retaining the 3 highest order bytes of the encrypted result. For AES key, the KCV is - computed by encrypting 8 bytes, each with value '01', with the key to be checked and - retaining the 3 highest order bytes of the encrypted result. -- `"Tags"`: The tags to attach to the key. Each tag consists of a tag key and a tag value. - Both the tag key and the tag value are required, but the tag value can be an empty (null) - string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key - with the same tag key. You can't have more than one tag on an Amazon Web Services Payment + uses to calculate the key check value (KCV). It is used to validate the key integrity. For + TDES keys, the KCV is computed by encrypting 8 bytes, each with value of zero, with the key + to be checked and retaining the 3 highest order bytes of the encrypted result. For AES + keys, the KCV is computed using a CMAC algorithm where the input data is 16 bytes of zero + and retaining the 3 highest order bytes of the encrypted result. +- `"Tags"`: Assigns one or more tags to the Amazon Web Services Payment Cryptography key. + Use this parameter to tag a key when it is imported. To tag an existing Amazon Web Services + Payment Cryptography key, use the TagResource operation. Each tag consists of a tag key and + a tag value. Both the tag key and the tag value are required, but the tag value can be an + empty (null) string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key. If you specify an existing tag key with a different tag value, Amazon Web Services Payment Cryptography replaces the current tag value with the - specified one. To use this parameter, you must have TagResource permission. Don't include - confidential or sensitive information in this field. This field may be displayed in - plaintext in CloudTrail logs and other output. Tagging or untagging an Amazon Web - Services Payment Cryptography key can allow or deny permission to the key. + specified one. Don't include personal, confidential or sensitive information in this + field. This field may be displayed in plaintext in CloudTrail logs and other output. + Tagging or untagging an Amazon Web Services Payment Cryptography key can allow or deny + permission to the key. """ function import_key(KeyMaterial; aws_config::AbstractAWSConfig=global_aws_config()) return payment_cryptography( @@ -698,7 +786,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KeyState"`: The key state of the keys you want to list. - `"MaxResults"`: Use this parameter to specify the maximum number of items to return. When this value is present, Amazon Web Services Payment Cryptography does not return more than - the specified number of items, but it might return fewer. + the specified number of items, but it might return fewer. This value is optional. If you + include a value, it must be between 1 and 100, inclusive. If you do not include a value, it + defaults to 50. - `"NextToken"`: Use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the truncated response you just received. @@ -735,7 +825,9 @@ accounts. Related operations: TagResource UntagResource Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: Use this parameter to specify the maximum number of items to return. When this value is present, Amazon Web Services Payment Cryptography does not return more than - the specified number of items, but it might return fewer. + the specified number of items, but it might return fewer. This value is optional. If you + include a value, it must be between 1 and 100, inclusive. If you do not include a value, it + defaults to 50. - `"NextToken"`: Use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the truncated response you just received. @@ -897,11 +989,11 @@ Services accounts. Related operations: ListTagsForResource UntagResourc can be an empty (null) string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key. If you specify an existing tag key with a different tag value, Amazon Web Services Payment Cryptography replaces the current tag - value with the new one. Don't include confidential or sensitive information in this field. - This field may be displayed in plaintext in CloudTrail logs and other output. To use this - parameter, you must have TagResource permission in an IAM policy. Don't include - confidential or sensitive information in this field. This field may be displayed in - plaintext in CloudTrail logs and other output. + value with the new one. Don't include personal, confidential or sensitive information in + this field. This field may be displayed in plaintext in CloudTrail logs and other output. + To use this parameter, you must have TagResource permission in an IAM policy. Don't + include personal, confidential or sensitive information in this field. This field may be + displayed in plaintext in CloudTrail logs and other output. """ function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/payment_cryptography_data.jl b/src/services/payment_cryptography_data.jl index 2f5103a507..cd9714f20c 100644 --- a/src/services/payment_cryptography_data.jl +++ b/src/services/payment_cryptography_data.jl @@ -8,23 +8,25 @@ using AWS.UUIDs decrypt_data(cipher_text, decryption_attributes, key_identifier) decrypt_data(cipher_text, decryption_attributes, key_identifier, params::Dict{String,<:Any}) -Decrypts ciphertext data to plaintext using symmetric, asymmetric, or DUKPT data encryption -key. For more information, see Decrypt data in the Amazon Web Services Payment Cryptography -User Guide. You can use an encryption key generated within Amazon Web Services Payment -Cryptography, or you can import your own encryption key by calling ImportKey. For this -operation, the key must have KeyModesOfUse set to Decrypt. In asymmetric decryption, Amazon -Web Services Payment Cryptography decrypts the ciphertext using the private component of -the asymmetric encryption key pair. For data encryption outside of Amazon Web Services -Payment Cryptography, you can export the public component of the asymmetric key pair by -calling GetPublicCertificate. For symmetric and DUKPT decryption, Amazon Web Services -Payment Cryptography supports TDES and AES algorithms. For asymmetric decryption, Amazon -Web Services Payment Cryptography supports RSA. When you use DUKPT, for TDES algorithm, the -ciphertext data length must be a multiple of 16 bytes. For AES algorithm, the ciphertext -data length must be a multiple of 32 bytes. For information about valid keys for this -operation, see Understanding key attributes and Key types for specific data operations in -the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This -operation can't be used across different Amazon Web Services accounts. Related operations: - EncryptData GetPublicCertificate ImportKey +Decrypts ciphertext data to plaintext using a symmetric (TDES, AES), asymmetric (RSA), or +derived (DUKPT or EMV) encryption key scheme. For more information, see Decrypt data in the +Amazon Web Services Payment Cryptography User Guide. You can use an encryption key +generated within Amazon Web Services Payment Cryptography, or you can import your own +encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse +set to Decrypt. In asymmetric decryption, Amazon Web Services Payment Cryptography decrypts +the ciphertext using the private component of the asymmetric encryption key pair. For data +encryption outside of Amazon Web Services Payment Cryptography, you can export the public +component of the asymmetric key pair by calling GetPublicCertificate. For symmetric and +DUKPT decryption, Amazon Web Services Payment Cryptography supports TDES and AES +algorithms. For EMV decryption, Amazon Web Services Payment Cryptography supports TDES +algorithms. For asymmetric decryption, Amazon Web Services Payment Cryptography supports +RSA. When you use TDES or TDES DUKPT, the ciphertext data length must be a multiple of 8 +bytes. For AES or AES DUKPT, the ciphertext data length must be a multiple of 16 bytes. For +RSA, it sould be equal to the key size unless padding is enabled. For information about +valid keys for this operation, see Understanding key attributes and Key types for specific +data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account +use: This operation can't be used across different Amazon Web Services accounts. Related +operations: EncryptData GetPublicCertificate ImportKey # Arguments - `cipher_text`: The ciphertext to decrypt. @@ -78,28 +80,37 @@ end encrypt_data(encryption_attributes, key_identifier, plain_text) encrypt_data(encryption_attributes, key_identifier, plain_text, params::Dict{String,<:Any}) -Encrypts plaintext data to ciphertext using symmetric, asymmetric, or DUKPT data encryption -key. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography -User Guide. You can generate an encryption key within Amazon Web Services Payment -Cryptography by calling CreateKey. You can import your own encryption key by calling -ImportKey. For this operation, the key must have KeyModesOfUse set to Encrypt. In -asymmetric encryption, plaintext is encrypted using public component. You can import the -public component of an asymmetric key pair created outside Amazon Web Services Payment -Cryptography by calling ImportKey). for symmetric and DUKPT encryption, Amazon Web -Services Payment Cryptography supports TDES and AES algorithms. For asymmetric encryption, -Amazon Web Services Payment Cryptography supports RSA. To encrypt using DUKPT, you must -already have a DUKPT key in your account with KeyModesOfUse set to DeriveKey, or you can -generate a new DUKPT key by calling CreateKey. For information about valid keys for this -operation, see Understanding key attributes and Key types for specific data operations in -the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation -can't be used across different Amazon Web Services accounts. Related operations: -DecryptData GetPublicCertificate ImportKey ReEncryptData +Encrypts plaintext data to ciphertext using a symmetric (TDES, AES), asymmetric (RSA), or +derived (DUKPT or EMV) encryption key scheme. For more information, see Encrypt data in the +Amazon Web Services Payment Cryptography User Guide. You can generate an encryption key +within Amazon Web Services Payment Cryptography by calling CreateKey. You can import your +own encryption key by calling ImportKey. For this operation, the key must have +KeyModesOfUse set to Encrypt. In asymmetric encryption, plaintext is encrypted using public +component. You can import the public component of an asymmetric key pair created outside +Amazon Web Services Payment Cryptography by calling ImportKey. For symmetric and DUKPT +encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For +EMV encryption, Amazon Web Services Payment Cryptography supports TDES algorithms.For +asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. When you use +TDES or TDES DUKPT, the plaintext data length must be a multiple of 8 bytes. For AES or AES +DUKPT, the plaintext data length must be a multiple of 16 bytes. For RSA, it sould be equal +to the key size unless padding is enabled. To encrypt using DUKPT, you must already have a +BDK (Base Derivation Key) key in your account with KeyModesOfUse set to DeriveKey, or you +can generate a new DUKPT key by calling CreateKey. To encrypt using EMV, you must already +have an IMK (Issuer Master Key) key in your account with KeyModesOfUse set to DeriveKey. +For information about valid keys for this operation, see Understanding key attributes and +Key types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: DecryptData GetPublicCertificate +ImportKey ReEncryptData # Arguments - `encryption_attributes`: The encryption key type and attributes for plaintext encryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption. -- `plain_text`: The plaintext to be encrypted. +- `plain_text`: The plaintext to be encrypted. For encryption using asymmetric keys, + plaintext data length is constrained by encryption key strength that you define in + KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more + information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. """ function encrypt_data( @@ -223,23 +234,26 @@ end generate_mac(generation_attributes, key_identifier, message_data, params::Dict{String,<:Any}) Generates a Message Authentication Code (MAC) cryptogram within Amazon Web Services Payment -Cryptography. You can use this operation when keys won't be shared but mutual data is -present on both ends for validation. In this case, known data values are used to generate a -MAC on both ends for comparision without sending or receiving data in ciphertext or -plaintext. You can use this operation to generate a DUPKT, HMAC or EMV MAC by setting -generation attributes and algorithm to the associated values. The MAC generation encryption -key must have valid values for KeyUsage such as TR31_M7_HMAC_KEY for HMAC generation, and -they key must have KeyModesOfUse set to Generate and Verify. For information about valid -keys for this operation, see Understanding key attributes and Key types for specific data -operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: -This operation can't be used across different Amazon Web Services accounts. Related -operations: VerifyMac +Cryptography. You can use this operation to authenticate card-related data by using known +data values to generate MAC for data validation between the sending and receiving parties. +This operation uses message data, a secret encryption key and MAC algorithm to generate a +unique MAC value for transmission. The receiving party of the MAC must use the same message +data, secret encryption key and MAC algorithm to reproduce another MAC value for +comparision. You can use this operation to generate a DUPKT, CMAC, HMAC or EMV MAC by +setting generation attributes and algorithm to the associated values. The MAC generation +encryption key must have valid values for KeyUsage such as TR31_M7_HMAC_KEY for HMAC +generation, and they key must have KeyModesOfUse set to Generate and Verify. For +information about valid keys for this operation, see Understanding key attributes and Key +types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: VerifyMac # Arguments - `generation_attributes`: The attributes and data values to use for MAC generation within Amazon Web Services Payment Cryptography. - `key_identifier`: The keyARN of the MAC generation encryption key. -- `message_data`: The data for which a MAC is under generation. +- `message_data`: The data for which a MAC is under generation. This value must be + hexBinary. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -470,18 +484,17 @@ another encryption key and changing PIN block format from one to another without data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation -type for DUKPT tranlations. You can use this operation for P2PE (Point to Point Encryption) -use cases where the encryption keys should change but the processing system either does not -need to, or is not permitted to, decrypt the data. The allowed combinations of PIN block -format translations are guided by PCI. It is important to note that not all encrypted PIN -block formats (example, format 1) require PAN (Primary Account Number) as input. And as -such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a -format (format 1) that does not require a PAN for generation. For information about valid -keys for this operation, see Understanding key attributes and Key types for specific data -operations in the Amazon Web Services Payment Cryptography User Guide. At this time, -Amazon Web Services Payment Cryptography does not support translations to PIN format 4. -Cross-account use: This operation can't be used across different Amazon Web Services -accounts. Related operations: GeneratePinData VerifyPinData +type for DUKPT translations. The allowed combinations of PIN block format translations are +guided by PCI. It is important to note that not all encrypted PIN block formats (example, +format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that +requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does +not require a PAN for generation. For information about valid keys for this operation, see +Understanding key attributes and Key types for specific data operations in the Amazon Web +Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography +currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. +That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: +This operation can't be used across different Amazon Web Services accounts. Related +operations: GeneratePinData VerifyPinData # Arguments - `encrypted_pin_block`: The encrypted PIN block data that Amazon Web Services Payment @@ -489,16 +502,16 @@ accounts. Related operations: GeneratePinData VerifyPinData - `incoming_key_identifier`: The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK. - `incoming_translation_attributes`: The format of the incoming PIN block data for - tranlation within Amazon Web Services Payment Cryptography. + translation within Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK. - `outgoing_translation_attributes`: The format of the outgoing PIN block data after - tranlation by Amazon Web Services Payment Cryptography. + translation by Amazon Web Services Payment Cryptography. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IncomingDukptAttributes"`: The attributes and values to use for incoming DUKPT - encryption key for PIN block tranlation. + encryption key for PIN block translation. - `"OutgoingDukptAttributes"`: The attributes and values to use for outgoing DUKPT encryption key after PIN block translation. """ @@ -732,22 +745,22 @@ end verify_mac(key_identifier, mac, message_data, verification_attributes) verify_mac(key_identifier, mac, message_data, verification_attributes, params::Dict{String,<:Any}) -Verifies a Message Authentication Code (MAC). You can use this operation when keys won't -be shared but mutual data is present on both ends for validation. In this case, known data -values are used to generate a MAC on both ends for verification without sending or -receiving data in ciphertext or plaintext. You can use this operation to verify a DUPKT, -HMAC or EMV MAC by setting generation attributes and algorithm to the associated values. -Use the same encryption key for MAC verification as you use for GenerateMac. For -information about valid keys for this operation, see Understanding key attributes and Key -types for specific data operations in the Amazon Web Services Payment Cryptography User -Guide. Cross-account use: This operation can't be used across different Amazon Web -Services accounts. Related operations: GenerateMac +Verifies a Message Authentication Code (MAC). You can use this operation to verify MAC for +message data authentication such as . In this operation, you must use the same message +data, secret encryption key and MAC algorithm that was used to generate MAC. You can use +this operation to verify a DUPKT, CMAC, HMAC or EMV MAC by setting generation attributes +and algorithm to the associated values. For information about valid keys for this +operation, see Understanding key attributes and Key types for specific data operations in +the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This +operation can't be used across different Amazon Web Services accounts. Related operations: + GenerateMac # Arguments - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to verify MAC data. - `mac`: The MAC being verified. -- `message_data`: The data on for which MAC is under verification. +- `message_data`: The data on for which MAC is under verification. This value must be + hexBinary. - `verification_attributes`: The attributes and data values to use for MAC verification within Amazon Web Services Payment Cryptography. diff --git a/src/services/pca_connector_ad.jl b/src/services/pca_connector_ad.jl new file mode 100644 index 0000000000..fcf3a934ca --- /dev/null +++ b/src/services/pca_connector_ad.jl @@ -0,0 +1,1067 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: pca_connector_ad +using AWS.Compat +using AWS.UUIDs + +""" + create_connector(certificate_authority_arn, directory_id, vpc_information) + create_connector(certificate_authority_arn, directory_id, vpc_information, params::Dict{String,<:Any}) + +Creates a connector between Amazon Web Services Private CA and an Active Directory. You +must specify the private CA, directory ID, and security groups. + +# Arguments +- `certificate_authority_arn`: The Amazon Resource Name (ARN) of the certificate authority + being used. +- `directory_id`: The identifier of the Active Directory. +- `vpc_information`: Security group IDs that describe the inbound and outbound rules. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency token. +- `"Tags"`: Metadata assigned to a connector consisting of a key-value pair. +""" +function create_connector( + CertificateAuthorityArn, + DirectoryId, + VpcInformation; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/connectors", + Dict{String,Any}( + "CertificateAuthorityArn" => CertificateAuthorityArn, + "DirectoryId" => DirectoryId, + "VpcInformation" => VpcInformation, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_connector( + CertificateAuthorityArn, + DirectoryId, + VpcInformation, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/connectors", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CertificateAuthorityArn" => CertificateAuthorityArn, + "DirectoryId" => DirectoryId, + "VpcInformation" => VpcInformation, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_directory_registration(directory_id) + create_directory_registration(directory_id, params::Dict{String,<:Any}) + +Creates a directory registration that authorizes communication between Amazon Web Services +Private CA and an Active Directory + +# Arguments +- `directory_id`: The identifier of the Active Directory. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency token. +- `"Tags"`: Metadata assigned to a directory registration consisting of a key-value pair. +""" +function create_directory_registration( + DirectoryId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "POST", + "/directoryRegistrations", + Dict{String,Any}("DirectoryId" => DirectoryId, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_directory_registration( + DirectoryId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/directoryRegistrations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DirectoryId" => DirectoryId, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_service_principal_name(connector_arn, directory_registration_arn) + create_service_principal_name(connector_arn, directory_registration_arn, params::Dict{String,<:Any}) + +Creates a service principal name (SPN) for the service account in Active Directory. +Kerberos authentication uses SPNs to associate a service instance with a service sign-in +account. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency token. +""" +function create_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_template(connector_arn, definition, name) + create_template(connector_arn, definition, name, params::Dict{String,<:Any}) + +Creates an Active Directory compatible certificate template. The connectors issues +certificates using these templates based on the requester’s Active Directory group +membership. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. +- `definition`: Template configuration to define the information included in certificates. + Define certificate validity and renewal periods, certificate request handling and + enrollment options, key usage extensions, application policies, and cryptography settings. +- `name`: Name of the template. The template name must be unique. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency token. +- `"Tags"`: Metadata assigned to a template consisting of a key-value pair. +""" +function create_template( + ConnectorArn, Definition, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "POST", + "/templates", + Dict{String,Any}( + "ConnectorArn" => ConnectorArn, + "Definition" => Definition, + "Name" => Name, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_template( + ConnectorArn, + Definition, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/templates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConnectorArn" => ConnectorArn, + "Definition" => Definition, + "Name" => Name, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_template_group_access_control_entry(access_rights, group_display_name, group_security_identifier, template_arn) + create_template_group_access_control_entry(access_rights, group_display_name, group_security_identifier, template_arn, params::Dict{String,<:Any}) + +Create a group access control entry. Allow or deny Active Directory groups from enrolling +and/or autoenrolling with the template based on the group security identifiers (SIDs). + +# Arguments +- `access_rights`: Allow or deny permissions for an Active Directory group to enroll or + autoenroll certificates for a template. +- `group_display_name`: Name of the Active Directory group. This name does not need to + match the group name in Active Directory. +- `group_security_identifier`: Security identifier (SID) of the group object from Active + Directory. The SID starts with \"S-\". +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Idempotency token. +""" +function create_template_group_access_control_entry( + AccessRights, + GroupDisplayName, + GroupSecurityIdentifier, + TemplateArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/templates/$(TemplateArn)/accessControlEntries", + Dict{String,Any}( + "AccessRights" => AccessRights, + "GroupDisplayName" => GroupDisplayName, + "GroupSecurityIdentifier" => GroupSecurityIdentifier, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_template_group_access_control_entry( + AccessRights, + GroupDisplayName, + GroupSecurityIdentifier, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/templates/$(TemplateArn)/accessControlEntries", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AccessRights" => AccessRights, + "GroupDisplayName" => GroupDisplayName, + "GroupSecurityIdentifier" => GroupSecurityIdentifier, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_connector(connector_arn) + delete_connector(connector_arn, params::Dict{String,<:Any}) + +Deletes a connector for Active Directory. You must provide the Amazon Resource Name (ARN) +of the connector that you want to delete. You can find the ARN by calling the +https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_ListConnectors action. +Deleting a connector does not deregister your directory with Amazon Web Services Private +CA. You can deregister your directory by calling the +https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_DeleteDirectoryRegistra +tion action. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. + +""" +function delete_connector(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "DELETE", + "/connectors/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_connector( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/connectors/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_directory_registration(directory_registration_arn) + delete_directory_registration(directory_registration_arn, params::Dict{String,<:Any}) + +Deletes a directory registration. Deleting a directory registration deauthorizes Amazon Web +Services Private CA with the directory. + +# Arguments +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +""" +function delete_directory_registration( + DirectoryRegistrationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "DELETE", + "/directoryRegistrations/$(DirectoryRegistrationArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_directory_registration( + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/directoryRegistrations/$(DirectoryRegistrationArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_service_principal_name(connector_arn, directory_registration_arn) + delete_service_principal_name(connector_arn, directory_registration_arn, params::Dict{String,<:Any}) + +Deletes the service principal name (SPN) used by a connector to authenticate with your +Active Directory. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +""" +function delete_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_template(template_arn) + delete_template(template_arn, params::Dict{String,<:Any}) + +Deletes a template. Certificates issued using the template are still valid until they are +revoked or expired. + +# Arguments +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +""" +function delete_template(TemplateArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "DELETE", + "/templates/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_template( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/templates/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_template_group_access_control_entry(group_security_identifier, template_arn) + delete_template_group_access_control_entry(group_security_identifier, template_arn, params::Dict{String,<:Any}) + +Deletes a group access control entry. + +# Arguments +- `group_security_identifier`: Security identifier (SID) of the group object from Active + Directory. The SID starts with \"S-\". +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +""" +function delete_template_group_access_control_entry( + GroupSecurityIdentifier, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "DELETE", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_template_group_access_control_entry( + GroupSecurityIdentifier, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_connector(connector_arn) + get_connector(connector_arn, params::Dict{String,<:Any}) + +Lists information about your connector. You specify the connector on input by its ARN +(Amazon Resource Name). + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. + +""" +function get_connector(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "GET", + "/connectors/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_connector( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/connectors/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_directory_registration(directory_registration_arn) + get_directory_registration(directory_registration_arn, params::Dict{String,<:Any}) + +A structure that contains information about your directory registration. + +# Arguments +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +""" +function get_directory_registration( + DirectoryRegistrationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_directory_registration( + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_service_principal_name(connector_arn, directory_registration_arn) + get_service_principal_name(connector_arn, directory_registration_arn, params::Dict{String,<:Any}) + +Lists the service principal name that the connector uses to authenticate with Active +Directory. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +""" +function get_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_service_principal_name( + ConnectorArn, + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_template(template_arn) + get_template(template_arn, params::Dict{String,<:Any}) + +Retrieves a certificate template that the connector uses to issue certificates from a +private CA. + +# Arguments +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +""" +function get_template(TemplateArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_template( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_template_group_access_control_entry(group_security_identifier, template_arn) + get_template_group_access_control_entry(group_security_identifier, template_arn, params::Dict{String,<:Any}) + +Retrieves the group access control entries for a template. + +# Arguments +- `group_security_identifier`: Security identifier (SID) of the group object from Active + Directory. The SID starts with \"S-\". +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +""" +function get_template_group_access_control_entry( + GroupSecurityIdentifier, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_template_group_access_control_entry( + GroupSecurityIdentifier, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_connectors() + list_connectors(params::Dict{String,<:Any}) + +Lists the connectors that you created by using the +https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateConnector action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Use this parameter when paginating results to specify the maximum number + of items to return in the response on each page. If additional items exist beyond the + number you specify, the NextToken element is sent in the response. Use this NextToken value + in a subsequent request to retrieve additional items. +- `"NextToken"`: Use this parameter when paginating results in a subsequent request after + you receive a response with truncated results. Set it to the value of the NextToken + parameter from the response you just received. +""" +function list_connectors(; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "GET", "/connectors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_connectors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", "/connectors", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_directory_registrations() + list_directory_registrations(params::Dict{String,<:Any}) + +Lists the directory registrations that you created by using the +https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateDirectoryRegistra +tion action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Use this parameter when paginating results to specify the maximum number + of items to return in the response on each page. If additional items exist beyond the + number you specify, the NextToken element is sent in the response. Use this NextToken value + in a subsequent request to retrieve additional items. +- `"NextToken"`: Use this parameter when paginating results in a subsequent request after + you receive a response with truncated results. Set it to the value of the NextToken + parameter from the response you just received. +""" +function list_directory_registrations(; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "GET", + "/directoryRegistrations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_directory_registrations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/directoryRegistrations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_service_principal_names(directory_registration_arn) + list_service_principal_names(directory_registration_arn, params::Dict{String,<:Any}) + +Lists the service principal names that the connector uses to authenticate with Active +Directory. + +# Arguments +- `directory_registration_arn`: The Amazon Resource Name (ARN) that was returned when you + called CreateDirectoryRegistration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Use this parameter when paginating results to specify the maximum number + of items to return in the response on each page. If additional items exist beyond the + number you specify, the NextToken element is sent in the response. Use this NextToken value + in a subsequent request to retrieve additional items. +- `"NextToken"`: Use this parameter when paginating results in a subsequent request after + you receive a response with truncated results. Set it to the value of the NextToken + parameter from the response you just received. +""" +function list_service_principal_names( + DirectoryRegistrationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_service_principal_names( + DirectoryRegistrationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/directoryRegistrations/$(DirectoryRegistrationArn)/servicePrincipalNames", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags, if any, that are associated with your resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that was returned when you created the + resource. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/tags/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/tags/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_template_group_access_control_entries(template_arn) + list_template_group_access_control_entries(template_arn, params::Dict{String,<:Any}) + +Lists group access control entries you created. + +# Arguments +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Use this parameter when paginating results to specify the maximum number + of items to return in the response on each page. If additional items exist beyond the + number you specify, the NextToken element is sent in the response. Use this NextToken value + in a subsequent request to retrieve additional items. +- `"NextToken"`: Use this parameter when paginating results in a subsequent request after + you receive a response with truncated results. Set it to the value of the NextToken + parameter from the response you just received. +""" +function list_template_group_access_control_entries( + TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)/accessControlEntries"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_template_group_access_control_entries( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/templates/$(TemplateArn)/accessControlEntries", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_templates(connector_arn) + list_templates(connector_arn, params::Dict{String,<:Any}) + +Lists the templates, if any, that are associated with a connector. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateConnector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Use this parameter when paginating results to specify the maximum number + of items to return in the response on each page. If additional items exist beyond the + number you specify, the NextToken element is sent in the response. Use this NextToken value + in a subsequent request to retrieve additional items. +- `"NextToken"`: Use this parameter when paginating results in a subsequent request after + you receive a response with truncated results. Set it to the value of the NextToken + parameter from the response you just received. +""" +function list_templates(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "GET", + "/templates", + Dict{String,Any}("ConnectorArn" => ConnectorArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_templates( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "GET", + "/templates", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectorArn" => ConnectorArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds one or more tags to your resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that was returned when you created the + resource. +- `tags`: Metadata assigned to a directory registration consisting of a key-value pair. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}("Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Tags" => Tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags from your resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that was returned when you created the + resource. +- `tag_keys`: Specifies a list of tag keys that you want to remove from the specified + resources. + +""" +function untag_resource( + ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_template(template_arn) + update_template(template_arn, params::Dict{String,<:Any}) + +Update template configuration to define the information included in certificates. + +# Arguments +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Definition"`: Template configuration to define the information included in + certificates. Define certificate validity and renewal periods, certificate request handling + and enrollment options, key usage extensions, application policies, and cryptography + settings. +- `"ReenrollAllCertificateHolders"`: This setting allows the major version of a template to + be increased automatically. All members of Active Directory groups that are allowed to + enroll with a template will receive a new certificate issued using that template. +""" +function update_template(TemplateArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_ad( + "PATCH", + "/templates/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_template( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "PATCH", + "/templates/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_template_group_access_control_entry(group_security_identifier, template_arn) + update_template_group_access_control_entry(group_security_identifier, template_arn, params::Dict{String,<:Any}) + +Update a group access control entry you created using +CreateTemplateGroupAccessControlEntry. + +# Arguments +- `group_security_identifier`: Security identifier (SID) of the group object from Active + Directory. The SID starts with \"S-\". +- `template_arn`: The Amazon Resource Name (ARN) that was returned when you called + CreateTemplate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessRights"`: Allow or deny permissions for an Active Directory group to enroll or + autoenroll certificates for a template. +- `"GroupDisplayName"`: Name of the Active Directory group. This name does not need to + match the group name in Active Directory. +""" +function update_template_group_access_control_entry( + GroupSecurityIdentifier, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_ad( + "PATCH", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_template_group_access_control_entry( + GroupSecurityIdentifier, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_ad( + "PATCH", + "/templates/$(TemplateArn)/accessControlEntries/$(GroupSecurityIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/pca_connector_scep.jl b/src/services/pca_connector_scep.jl new file mode 100644 index 0000000000..cec1ad104b --- /dev/null +++ b/src/services/pca_connector_scep.jl @@ -0,0 +1,491 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: pca_connector_scep +using AWS.Compat +using AWS.UUIDs + +""" + create_challenge(connector_arn) + create_challenge(connector_arn, params::Dict{String,<:Any}) + +For general-purpose connectors. Creates a challenge password for the specified connector. +The SCEP protocol uses a challenge password to authenticate a request before issuing a +certificate from a certificate authority (CA). Your SCEP clients include the challenge +password as part of their certificate request to Connector for SCEP. To retrieve the +connector Amazon Resource Names (ARNs) for the connectors in your account, call +ListConnectors. To create additional challenge passwords for the connector, call +CreateChallenge again. We recommend frequently rotating your challenge passwords. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) of the connector that you want to create + a challenge for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Custom string that can be used to distinguish between calls to the + CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. + Therefore, if you call CreateChallenge multiple times with the same client token within + five minutes, Connector for SCEP recognizes that you are requesting only one challenge and + will only respond with one. If you change the client token for each call, Connector for + SCEP recognizes that you are requesting multiple challenge passwords. +- `"Tags"`: The key-value pairs to associate with the resource. +""" +function create_challenge(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "POST", + "/challenges", + Dict{String,Any}("ConnectorArn" => ConnectorArn, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_challenge( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "POST", + "/challenges", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConnectorArn" => ConnectorArn, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_connector(certificate_authority_arn) + create_connector(certificate_authority_arn, params::Dict{String,<:Any}) + +Creates a SCEP connector. A SCEP connector links Amazon Web Services Private Certificate +Authority to your SCEP-compatible devices and mobile device management (MDM) systems. +Before you create a connector, you must complete a set of prerequisites, including creation +of a private certificate authority (CA) to use with this connector. For more information, +see Connector for SCEP prerequisites. + +# Arguments +- `certificate_authority_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services + Private Certificate Authority certificate authority to use with this connector. Due to + security vulnerabilities present in the SCEP protocol, we recommend using a private CA + that's dedicated for use with the connector. To retrieve the private CAs associated with + your account, you can call ListCertificateAuthorities using the Amazon Web Services Private + CA API. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Custom string that can be used to distinguish between calls to the + CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. + Therefore, if you call CreateChallenge multiple times with the same client token within + five minutes, Connector for SCEP recognizes that you are requesting only one challenge and + will only respond with one. If you change the client token for each call, Connector for + SCEP recognizes that you are requesting multiple challenge passwords. +- `"MobileDeviceManagement"`: If you don't supply a value, by default Connector for SCEP + creates a connector for general-purpose use. A general-purpose connector is designed to + work with clients or endpoints that support the SCEP protocol, except Connector for SCEP + for Microsoft Intune. With connectors for general-purpose use, you manage SCEP challenge + passwords using Connector for SCEP. For information about considerations and limitations + with using Connector for SCEP, see Considerations and Limitations. If you provide an + IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, + and you manage the challenge passwords using Microsoft Intune. For more information, see + Using Connector for SCEP for Microsoft Intune. +- `"Tags"`: The key-value pairs to associate with the resource. +""" +function create_connector( + CertificateAuthorityArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "POST", + "/connectors", + Dict{String,Any}( + "CertificateAuthorityArn" => CertificateAuthorityArn, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_connector( + CertificateAuthorityArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "POST", + "/connectors", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CertificateAuthorityArn" => CertificateAuthorityArn, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_challenge(challenge_arn) + delete_challenge(challenge_arn, params::Dict{String,<:Any}) + +Deletes the specified Challenge. + +# Arguments +- `challenge_arn`: The Amazon Resource Name (ARN) of the challenge password to delete. + +""" +function delete_challenge(ChallengeArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "DELETE", + "/challenges/$(ChallengeArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_challenge( + ChallengeArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "DELETE", + "/challenges/$(ChallengeArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_connector(connector_arn) + delete_connector(connector_arn, params::Dict{String,<:Any}) + +Deletes the specified Connector. This operation also deletes any challenges associated with +the connector. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) of the connector to delete. + +""" +function delete_connector(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "DELETE", + "/connectors/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_connector( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "DELETE", + "/connectors/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_challenge_metadata(challenge_arn) + get_challenge_metadata(challenge_arn, params::Dict{String,<:Any}) + +Retrieves the metadata for the specified Challenge. + +# Arguments +- `challenge_arn`: The Amazon Resource Name (ARN) of the challenge. + +""" +function get_challenge_metadata( + ChallengeArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "GET", + "/challengeMetadata/$(ChallengeArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_challenge_metadata( + ChallengeArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "GET", + "/challengeMetadata/$(ChallengeArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_challenge_password(challenge_arn) + get_challenge_password(challenge_arn, params::Dict{String,<:Any}) + +Retrieves the challenge password for the specified Challenge. + +# Arguments +- `challenge_arn`: The Amazon Resource Name (ARN) of the challenge. + +""" +function get_challenge_password( + ChallengeArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "GET", + "/challengePasswords/$(ChallengeArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_challenge_password( + ChallengeArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "GET", + "/challengePasswords/$(ChallengeArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_connector(connector_arn) + get_connector(connector_arn, params::Dict{String,<:Any}) + +Retrieves details about the specified Connector. Calling this action returns important +details about the connector, such as the public SCEP URL where your clients can request +certificates. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) of the connector. + +""" +function get_connector(ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "GET", + "/connectors/$(ConnectorArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_connector( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "GET", + "/connectors/$(ConnectorArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_challenge_metadata(connector_arn) + list_challenge_metadata(connector_arn, params::Dict{String,<:Any}) + +Retrieves the challenge metadata for the specified ARN. + +# Arguments +- `connector_arn`: The Amazon Resource Name (ARN) of the connector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that you want Connector for SCEP to return + for this request. If more objects are available, in the response, Connector for SCEP + provides a NextToken value that you can use in a subsequent call to get the next batch of + objects. +- `"NextToken"`: When you request a list of objects with a MaxResults setting, if the + number of objects that are still available for retrieval exceeds the maximum you requested, + Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of + objects, use the token returned from the prior request in your next request. +""" +function list_challenge_metadata( + ConnectorArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "GET", + "/challengeMetadata", + Dict{String,Any}("ConnectorArn" => ConnectorArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_challenge_metadata( + ConnectorArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "GET", + "/challengeMetadata", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectorArn" => ConnectorArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_connectors() + list_connectors(params::Dict{String,<:Any}) + +Lists the connectors belonging to your Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of objects that you want Connector for SCEP to return + for this request. If more objects are available, in the response, Connector for SCEP + provides a NextToken value that you can use in a subsequent call to get the next batch of + objects. +- `"NextToken"`: When you request a list of objects with a MaxResults setting, if the + number of objects that are still available for retrieval exceeds the maximum you requested, + Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of + objects, use the token returned from the prior request in your next request. +""" +function list_connectors(; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "GET", "/connectors"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_connectors( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "GET", "/connectors", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Retrieves the tags associated with the specified resource. Tags are key-value pairs that +you can use to categorize and manage your resources, for purposes like billing. For +example, you might set the tag key to \"customer\" and the value to the customer name or +ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 +tags for a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "GET", + "/tags/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "GET", + "/tags/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds one or more tags to your resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tags`: The key-value pairs to associate with the resource. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return pca_connector_scep( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}("Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Tags" => Tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags from your resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tag_keys`: Specifies a list of tag keys that you want to remove from the specified + resources. + +""" +function untag_resource( + ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return pca_connector_scep( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pca_connector_scep( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/personalize.jl b/src/services/personalize.jl index 44fa7f4830..ed834528be 100644 --- a/src/services/personalize.jl +++ b/src/services/personalize.jl @@ -8,9 +8,18 @@ using AWS.UUIDs create_batch_inference_job(job_input, job_name, job_output, role_arn, solution_version_arn) create_batch_inference_job(job_input, job_name, job_output, role_arn, solution_version_arn, params::Dict{String,<:Any}) -Creates a batch inference job. The operation can handle up to 50 million records and the -input file must be in JSON format. For more information, see Creating a batch inference -job. +Generates batch recommendations based on a list of items or users stored in Amazon S3 and +exports the recommendations to an Amazon S3 bucket. To generate batch recommendations, +specify the ARN of a solution version and an Amazon S3 URI for the input and output data. +For user personalization, popular items, and personalized ranking solutions, the batch +inference job generates a list of recommended items for each user ID in the input file. For +related items solutions, the job generates a list of recommended items for each item ID in +the input file. For more information, see Creating a batch inference job . If you use the +Similar-Items recipe, Amazon Personalize can add descriptive themes to batch +recommendations. To generate themes, set the job's mode to THEME_GENERATION and specify the +name of the field that contains item names in the input data. For more information about +generating themes, see Batch recommendations with themes from Content Generator . You +can't get batch recommendations with the Trending-Now or Next-Best-Action recipes. # Arguments - `job_input`: The Amazon S3 path that leads to the input file to base your recommendations @@ -25,10 +34,17 @@ job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"batchInferenceJobConfig"`: The configuration details of a batch inference job. +- `"batchInferenceJobMode"`: The mode of the batch inference job. To generate descriptive + themes for groups of similar items, set the job mode to THEME_GENERATION. If you don't want + to generate themes, use the default BATCH_INFERENCE. When you get batch recommendations + with themes, you will incur additional costs. For more information, see Amazon Personalize + pricing. - `"filterArn"`: The ARN of the filter to apply to the batch inference job. For more information on using filters, see Filtering batch recommendations. - `"numResults"`: The number of recommendations to retrieve. - `"tags"`: A list of tags to apply to the batch inference job. +- `"themeGenerationConfig"`: For theme generation jobs, specify the name of the column in + your Items dataset that contains each item's name. """ function create_batch_inference_job( jobInput, @@ -159,30 +175,42 @@ end create_campaign(name, solution_version_arn) create_campaign(name, solution_version_arn, params::Dict{String,<:Any}) -Creates a campaign that deploys a solution version. When a client calls the -GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request. - Minimum Provisioned TPS and Auto-Scaling A high minProvisionedTPS will increase your -bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage -using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary. A -transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per -second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum -provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon -Personalize, and thus, the minimum billing charge. If your TPS increases beyond -minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but -never below minProvisionedTPS. There's a short time delay while the capacity is increased -that might cause loss of transactions. The actual TPS used is calculated as the average -requests/second within a 5-minute window. You pay for maximum of either the minimum -provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS, -track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS -as necessary. Status A campaign can be in one of the following states: CREATE PENDING -> CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > DELETE -IN_PROGRESS To get the campaign status, call DescribeCampaign. Wait until the status of -the campaign is ACTIVE before asking the campaign for recommendations. Related APIs -ListCampaigns DescribeCampaign UpdateCampaign DeleteCampaign + You incur campaign costs while it is active. To avoid unnecessary costs, make sure to +delete the campaign when you are finished. For information about campaign costs, see Amazon +Personalize pricing. Creates a campaign that deploys a solution version. When a client +calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in +the request. Minimum Provisioned TPS and Auto-Scaling A high minProvisionedTPS will +increase your cost. We recommend starting with 1 for minProvisionedTPS (the default). Track +your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as +necessary. When you create an Amazon Personalize campaign, you can specify the minimum +provisioned transactions per second (minProvisionedTPS) for the campaign. This is the +baseline transaction throughput for the campaign provisioned by Amazon Personalize. It sets +the minimum billing charge for the campaign while it is active. A transaction is a single +GetRecommendations or GetPersonalizedRanking request. The default minProvisionedTPS is 1. +If your TPS increases beyond the minProvisionedTPS, Amazon Personalize auto-scales the +provisioned capacity up and down, but never below minProvisionedTPS. There's a short time +delay while the capacity is increased that might cause loss of transactions. When your +traffic reduces, capacity returns to the minProvisionedTPS. You are charged for the the +minimum provisioned TPS or, if your requests exceed the minProvisionedTPS, the actual TPS. +The actual TPS is the total number of recommendation requests you make. We recommend +starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, +and then increase the minProvisionedTPS as necessary. For more information about campaign +costs, see Amazon Personalize pricing. Status A campaign can be in one of the following +states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE +PENDING > DELETE IN_PROGRESS To get the campaign status, call DescribeCampaign. Wait +until the status of the campaign is ACTIVE before asking the campaign for recommendations. + Related APIs ListCampaigns DescribeCampaign UpdateCampaign DeleteCampaign + # Arguments - `name`: A name for the new campaign. The campaign name must be unique within your account. -- `solution_version_arn`: The Amazon Resource Name (ARN) of the solution version to deploy. +- `solution_version_arn`: The Amazon Resource Name (ARN) of the trained model to deploy + with the campaign. To specify the latest solution version of your solution, specify the ARN + of your solution in SolutionArn/LATEST format. You must use this format if you set + syncWithLatestSolutionVersion to True in the CampaignConfig. To deploy a model that isn't + the latest solution version of your solution, specify the ARN of the solution version. + For more information about automatic campaign updates, see Enabling automatic campaign + updates. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -226,24 +254,110 @@ function create_campaign( ) end +""" + create_data_deletion_job(data_source, dataset_group_arn, job_name, role_arn) + create_data_deletion_job(data_source, dataset_group_arn, job_name, role_arn, params::Dict{String,<:Any}) + +Creates a batch job that deletes all references to specific users from an Amazon +Personalize dataset group in batches. You specify the users to delete in a CSV file of +userIds in an Amazon S3 bucket. After a job completes, Amazon Personalize no longer trains +on the users’ data and no longer considers the users when generating user segments. For +more information about creating a data deletion job, see Deleting users. Your input file +must be a CSV file with a single USER_ID column that lists the users IDs. For more +information about preparing the CSV file, see Preparing your data deletion file and +uploading it to Amazon S3. To give Amazon Personalize permission to access your input CSV +file of userIds, you must specify an IAM service role that has permission to read from the +data source. This role needs GetObject and ListBucket permissions for the bucket and its +content. These permissions are the same as importing data. For information on granting +access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 +Resources. After you create a job, it can take up to a day to delete all references to +the users from datasets and models. Until the job completes, Amazon Personalize continues +to use the data when training. And if you use a User Segmentation recipe, the users might +appear in user segments. Status A data deletion job can have one of the following +statuses: PENDING > IN_PROGRESS > COMPLETED -or- FAILED To get the status of the +data deletion job, call DescribeDataDeletionJob API operation and specify the Amazon +Resource Name (ARN) of the job. If the status is FAILED, the response includes a +failureReason key, which describes why the job failed. Related APIs +ListDataDeletionJobs DescribeDataDeletionJob + +# Arguments +- `data_source`: The Amazon S3 bucket that contains the list of userIds of the users to + delete. +- `dataset_group_arn`: The Amazon Resource Name (ARN) of the dataset group that has the + datasets you want to delete records from. +- `job_name`: The name for the data deletion job. +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role that has permissions to read + from the Amazon S3 data source. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: A list of tags to apply to the data deletion job. +""" +function create_data_deletion_job( + dataSource, + datasetGroupArn, + jobName, + roleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "CreateDataDeletionJob", + Dict{String,Any}( + "dataSource" => dataSource, + "datasetGroupArn" => datasetGroupArn, + "jobName" => jobName, + "roleArn" => roleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_deletion_job( + dataSource, + datasetGroupArn, + jobName, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "CreateDataDeletionJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "dataSource" => dataSource, + "datasetGroupArn" => datasetGroupArn, + "jobName" => jobName, + "roleArn" => roleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_dataset(dataset_group_arn, dataset_type, name, schema_arn) create_dataset(dataset_group_arn, dataset_type, name, schema_arn, params::Dict{String,<:Any}) Creates an empty dataset and adds it to the specified dataset group. Use -CreateDatasetImportJob to import your training data to a dataset. There are three types of -datasets: Interactions Items Users Each dataset type has an associated schema with -required field types. Only the Interactions dataset is required in order to train a model -(also referred to as creating a solution). A dataset can be in one of the following states: - CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING -> DELETE IN_PROGRESS To get the status of the dataset, call DescribeDataset. Related -APIs CreateDatasetGroup ListDatasets DescribeDataset DeleteDataset +CreateDatasetImportJob to import your training data to a dataset. There are 5 types of +datasets: Item interactions Items Users Action interactions Actions Each +dataset type has an associated schema with required field types. Only the Item interactions +dataset is required in order to train a model (also referred to as creating a solution). A +dataset can be in one of the following states: CREATE PENDING > CREATE IN_PROGRESS +> ACTIVE -or- CREATE FAILED DELETE PENDING > DELETE IN_PROGRESS To get the status +of the dataset, call DescribeDataset. Related APIs CreateDatasetGroup ListDatasets + DescribeDataset DeleteDataset # Arguments - `dataset_group_arn`: The Amazon Resource Name (ARN) of the dataset group to add the dataset to. - `dataset_type`: The type of dataset. One of the following (case insensitive) values: - Interactions Items Users + Interactions Items Users Actions Action_Interactions - `name`: The name for the dataset. - `schema_arn`: The ARN of the schema to associate with the dataset. The schema defines the dataset fields. @@ -381,22 +495,22 @@ end Creates an empty dataset group. A dataset group is a container for Amazon Personalize resources. A dataset group can contain at most three datasets, one for each type of -dataset: Interactions Items Users A dataset group can be a Domain dataset group, -where you specify a domain and use pre-configured resources like recommenders, or a Custom -dataset group, where you use custom resources, such as a solution with a solution version, -that you deploy with a campaign. If you start with a Domain dataset group, you can still -add custom resources such as solutions and solution versions trained with recipes for -custom use cases and deployed with campaigns. A dataset group can be in one of the -following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED -DELETE PENDING To get the status of the dataset group, call DescribeDatasetGroup. If the -status shows as CREATE FAILED, the response includes a failureReason key, which describes -why the creation failed. You must wait until the status of the dataset group is ACTIVE -before adding a dataset to the group. You can specify an Key Management Service (KMS) key -to encrypt the datasets in the group. If you specify a KMS key, you must also include an -Identity and Access Management (IAM) role that has permission to access the key. APIs that -require a dataset group ARN in the request CreateDataset CreateEventTracker -CreateSolution Related APIs ListDatasetGroups DescribeDatasetGroup -DeleteDatasetGroup +dataset: Item interactions Items Users Actions Action interactions A dataset +group can be a Domain dataset group, where you specify a domain and use pre-configured +resources like recommenders, or a Custom dataset group, where you use custom resources, +such as a solution with a solution version, that you deploy with a campaign. If you start +with a Domain dataset group, you can still add custom resources such as solutions and +solution versions trained with recipes for custom use cases and deployed with campaigns. A +dataset group can be in one of the following states: CREATE PENDING > CREATE +IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING To get the status of the +dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the +response includes a failureReason key, which describes why the creation failed. You must +wait until the status of the dataset group is ACTIVE before adding a dataset to the group. +You can specify an Key Management Service (KMS) key to encrypt the datasets in the group. +If you specify a KMS key, you must also include an Identity and Access Management (IAM) +role that has permission to access the key. APIs that require a dataset group ARN in the +request CreateDataset CreateEventTracker CreateSolution Related APIs +ListDatasetGroups DescribeDatasetGroup DeleteDatasetGroup # Arguments - `name`: The name for the new dataset group. @@ -442,10 +556,13 @@ Amazon Personalize dataset. To allow Amazon Personalize to import the training d must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon -S3 Resources. By default, a dataset import job replaces any existing data in the dataset -that you imported in bulk. To add new records without replacing existing data, specify -INCREMENTAL for the import mode in the CreateDatasetImportJob operation. Status A -dataset import job can be in one of the following states: CREATE PENDING > CREATE +S3 Resources. If you already created a recommender or deployed a custom solution version +with a campaign, how new bulk records influence recommendations depends on the domain use +case or recipe that you use. For more information, see How new data influences real-time +recommendations. By default, a dataset import job replaces any existing data in the +dataset that you imported in bulk. To add new records without replacing existing data, +specify INCREMENTAL for the import mode in the CreateDatasetImportJob operation. Status +A dataset import job can be in one of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as @@ -528,12 +645,12 @@ using the PutEvents API. Only one event tracker can be associated with a datase You will get an error if you call CreateEventTracker using the same dataset group as an existing event tracker. When you create an event tracker, the response includes a tracking ID, which you pass as a parameter when you use the PutEvents operation. Amazon Personalize -then appends the event data to the Interactions dataset of the dataset group you specify in -your event tracker. The event tracker can be in one of the following states: CREATE -PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > DELETE -IN_PROGRESS To get the status of the event tracker, call DescribeEventTracker. The event -tracker must be in the ACTIVE state before using the tracking ID. Related APIs -ListEventTrackers DescribeEventTracker DeleteEventTracker +then appends the event data to the Item interactions dataset of the dataset group you +specify in your event tracker. The event tracker can be in one of the following states: +CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > +DELETE IN_PROGRESS To get the status of the event tracker, call DescribeEventTracker. +The event tracker must be in the ACTIVE state before using the tracking ID. Related APIs + ListEventTrackers DescribeEventTracker DeleteEventTracker # Arguments - `dataset_group_arn`: The Amazon Resource Name (ARN) of the dataset group that receives @@ -834,23 +951,34 @@ end create_solution(dataset_group_arn, name) create_solution(dataset_group_arn, name, params::Dict{String,<:Any}) -Creates the configuration for training a model. A trained model is known as a solution -version. After the configuration is created, you train the model (create a solution -version) by calling the CreateSolutionVersion operation. Every time you call -CreateSolutionVersion, a new version of the solution is created. After creating a solution -version, you check its accuracy by calling GetSolutionMetrics. When you are satisfied with -the version, you deploy it using CreateCampaign. The campaign provides recommendations to a -client through the GetRecommendations API. To train a model, Amazon Personalize requires -training data and a recipe. The training data comes from the dataset group that you provide -in the request. A recipe specifies the training algorithm and a feature transformation. You -can specify one of the predefined recipes provided by Amazon Personalize. Amazon -Personalize doesn't support configuring the hpoObjective for solution hyperparameter -optimization at this time. Status A solution can be in one of the following states: -CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > -DELETE IN_PROGRESS To get the status of the solution, call DescribeSolution. Wait until -the status shows as ACTIVE before calling CreateSolutionVersion. Related APIs -ListSolutions CreateSolutionVersion DescribeSolution DeleteSolution -ListSolutionVersions DescribeSolutionVersion + After you create a solution, you can’t change its configuration. By default, all new +solutions use automatic training. With automatic training, you incur training costs while +your solution is active. You can't stop automatic training for a solution. To avoid +unnecessary costs, make sure to delete the solution when you are finished. For information +about training costs, see Amazon Personalize pricing. Creates the configuration for +training a model (creating a solution version). This configuration includes the recipe to +use for model training and optional training configuration, such as columns to use in +training and feature transformation parameters. For more information about configuring a +solution, see Creating and configuring a solution. By default, new solutions use +automatic training to create solution versions every 7 days. You can change the training +frequency. Automatic solution version creation starts one hour after the solution is +ACTIVE. If you manually create a solution version within the hour, the solution skips the +first automatic training. For more information, see Configuring automatic training. To +turn off automatic training, set performAutoTraining to false. If you turn off automatic +training, you must manually create a solution version by calling the CreateSolutionVersion +operation. After training starts, you can get the solution version's Amazon Resource Name +(ARN) with the ListSolutionVersions API operation. To get its status, use the +DescribeSolutionVersion. After training completes you can evaluate model accuracy by +calling GetSolutionMetrics. When you are satisfied with the solution version, you deploy it +using CreateCampaign. The campaign provides recommendations to a client through the +GetRecommendations API. Amazon Personalize doesn't support configuring the hpoObjective +for solution hyperparameter optimization at this time. Status A solution can be in one +of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE +FAILED DELETE PENDING > DELETE IN_PROGRESS To get the status of the solution, call +DescribeSolution. If you use manual training, the status must be ACTIVE before you call +CreateSolutionVersion. Related APIs ListSolutions CreateSolutionVersion +DescribeSolution DeleteSolution ListSolutionVersions DescribeSolutionVersion + # Arguments - `dataset_group_arn`: The Amazon Resource Name (ARN) of the dataset group that provides @@ -865,17 +993,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys interactions for training with equal weight regardless of type. - `"performAutoML"`: We don't recommend enabling automated machine learning. Instead, match your use case to the available Amazon Personalize recipes. For more information, see - Determining your use case. Whether to perform automated machine learning (AutoML). The - default is false. For this case, you must specify recipeArn. When set to true, Amazon - Personalize analyzes your training data and selects the optimal USER_PERSONALIZATION recipe - and hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines - the optimal recipe by running tests with different values for the hyperparameters. AutoML + Choosing a recipe. Whether to perform automated machine learning (AutoML). The default is + false. For this case, you must specify recipeArn. When set to true, Amazon Personalize + analyzes your training data and selects the optimal USER_PERSONALIZATION recipe and + hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines the + optimal recipe by running tests with different values for the hyperparameters. AutoML lengthens the training process as compared to selecting a specific recipe. +- `"performAutoTraining"`: Whether the solution uses automatic training to create new + solution versions (trained models). The default is True and the solution automatically + creates new solution versions every 7 days. You can change the training frequency by + specifying a schedulingExpression in the AutoTrainingConfig as part of solution + configuration. For more information about automatic training, see Configuring automatic + training. Automatic solution version creation starts one hour after the solution is + ACTIVE. If you manually create a solution version within the hour, the solution skips the + first automatic training. After training starts, you can get the solution version's + Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, + use the DescribeSolutionVersion. - `"performHPO"`: Whether to perform hyperparameter optimization (HPO) on the specified or selected recipe. The default is false. When performing AutoML, this parameter is always true and you should not set it to false. -- `"recipeArn"`: The ARN of the recipe to use for model training. Only specified when - performAutoML is false. +- `"recipeArn"`: The Amazon Resource Name (ARN) of the recipe to use for model training. + This is required when performAutoML is false. For information about different Amazon + Personalize recipes and their ARNs, see Choosing a recipe. - `"solutionConfig"`: The configuration to use with the solution. When performAutoML is set to true, Amazon Personalize only evaluates the autoMLConfig section of the solution configuration. Amazon Personalize doesn't support configuring the hpoObjective at this @@ -936,13 +1075,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"name"`: The name of the solution version. - `"tags"`: A list of tags to apply to the solution version. - `"trainingMode"`: The scope of training to be performed when creating the solution - version. The FULL option trains the solution version based on the entirety of the input - solution's training data, while the UPDATE option processes only the data that has changed - in comparison to the input solution. Choose UPDATE when you want to incrementally update - your solution version instead of creating an entirely new one. The UPDATE option can only - be used when you already have an active solution version created from the input solution - using the FULL option and the input solution was trained with the User-Personalization - recipe or the HRNN-Coldstart recipe. + version. The default is FULL. This creates a completely new model based on the entirety of + the training data from the datasets in your dataset group. If you use + User-Personalization, you can specify a training mode of UPDATE. This updates the model to + consider new items for recommendations. It is not a full retraining. You should still + complete a full retraining weekly. If you specify UPDATE, Amazon Personalize will stop + automatic updates for the solution version. To resume updates, create a new solution with + training mode set to FULL and deploy it in a campaign. For more information about automatic + updates, see Automatic updates. The UPDATE option can only be used when you already have + an active solution version created from the input solution using the FULL option and the + input solution was trained with the User-Personalization recipe or the legacy + HRNN-Coldstart recipe. """ function create_solution_version( solutionArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1083,8 +1226,8 @@ end delete_event_tracker(event_tracker_arn) delete_event_tracker(event_tracker_arn, params::Dict{String,<:Any}) -Deletes the event tracker. Does not delete the event-interactions dataset from the -associated dataset group. For more information on event trackers, see CreateEventTracker. +Deletes the event tracker. Does not delete the dataset from the dataset group. For more +information on event trackers, see CreateEventTracker. # Arguments - `event_tracker_arn`: The Amazon Resource Name (ARN) of the event tracker to delete. @@ -1446,6 +1589,43 @@ function describe_campaign( ) end +""" + describe_data_deletion_job(data_deletion_job_arn) + describe_data_deletion_job(data_deletion_job_arn, params::Dict{String,<:Any}) + +Describes the data deletion job created by CreateDataDeletionJob, including the job status. + +# Arguments +- `data_deletion_job_arn`: The Amazon Resource Name (ARN) of the data deletion job. + +""" +function describe_data_deletion_job( + dataDeletionJobArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize( + "DescribeDataDeletionJob", + Dict{String,Any}("dataDeletionJobArn" => dataDeletionJobArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_data_deletion_job( + dataDeletionJobArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "DescribeDataDeletionJob", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("dataDeletionJobArn" => dataDeletionJobArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dataset(dataset_arn) describe_dataset(dataset_arn, params::Dict{String,<:Any}) @@ -2063,6 +2243,40 @@ function list_campaigns( ) end +""" + list_data_deletion_jobs() + list_data_deletion_jobs(params::Dict{String,<:Any}) + +Returns a list of data deletion jobs for a dataset group ordered by creation time, with the +most recent first. When a dataset group is not specified, all the data deletion jobs +associated with the account are listed. The response provides the properties for each job, +including the Amazon Resource Name (ARN). For more information on data deletion jobs, see +Deleting users. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datasetGroupArn"`: The Amazon Resource Name (ARN) of the dataset group to list data + deletion jobs for. +- `"maxResults"`: The maximum number of data deletion jobs to return. +- `"nextToken"`: A token returned from the previous call to ListDataDeletionJobs for + getting the next set of jobs (if they exist). +""" +function list_data_deletion_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return personalize( + "ListDataDeletionJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_data_deletion_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize( + "ListDataDeletionJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_dataset_export_jobs() list_dataset_export_jobs(params::Dict{String,<:Any}) @@ -2171,8 +2385,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"datasetGroupArn"`: The Amazon Resource Name (ARN) of the dataset group that contains the datasets to list. - `"maxResults"`: The maximum number of datasets to return. -- `"nextToken"`: A token returned from the previous call to ListDatasetImportJobs for - getting the next set of dataset import jobs (if they exist). +- `"nextToken"`: A token returned from the previous call to ListDatasets for getting the + next set of dataset import jobs (if they exist). """ function list_datasets(; aws_config::AbstractAWSConfig=global_aws_config()) return personalize( @@ -2428,7 +2642,7 @@ end list_solutions() list_solutions(params::Dict{String,<:Any}) -Returns a list of solutions that use the given dataset group. When a dataset group is not +Returns a list of solutions in a given dataset group. When a dataset group is not specified, all the solutions associated with the account are listed. The response provides the properties for each solution, including the Amazon Resource Name (ARN). For more information on solutions, see CreateSolution. @@ -2460,7 +2674,7 @@ end Get a list of tags attached to a resource. # Arguments -- `resource_arn`: The resource's Amazon Resource Name. +- `resource_arn`: The resource's Amazon Resource Name (ARN). """ function list_tags_for_resource( @@ -2610,7 +2824,7 @@ Add a list of tags to a resource. # Arguments - `resource_arn`: The resource's Amazon Resource Name (ARN). - `tags`: Tags to apply to the resource. For more information see Tagging Amazon - Personalize recources. + Personalize resources. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2645,11 +2859,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Remove tags that are attached to a resource. +Removes the specified tags that are attached to a resource. For more information, see +Removing tags from Amazon Personalize resources. # Arguments - `resource_arn`: The resource's Amazon Resource Name (ARN). -- `tag_keys`: Keys to remove from the resource's tags. +- `tag_keys`: The keys of the tags to be removed. """ function untag_resource( @@ -2686,13 +2901,19 @@ end update_campaign(campaign_arn) update_campaign(campaign_arn, params::Dict{String,<:Any}) -Updates a campaign by either deploying a new solution or changing the value of the -campaign's minProvisionedTPS parameter. To update a campaign, the campaign status must be + Updates a campaign to deploy a retrained solution version with an existing campaign, +change your campaign's minProvisionedTPS, or modify your campaign's configuration. For +example, you can set enableMetadataWithRecommendations to true for an existing campaign. +To update a campaign to start automatically using the latest solution version, specify the +following: For the SolutionVersionArn parameter, specify the Amazon Resource Name (ARN) +of your solution in SolutionArn/LATEST format. In the campaignConfig, set +syncWithLatestSolutionVersion to true. To update a campaign, the campaign status must be ACTIVE or CREATE FAILED. Check the campaign status using the DescribeCampaign operation. You can still get recommendations from a campaign while an update is in progress. The campaign will use the previous solution version and campaign configuration to generate recommendations until the latest campaign update status is Active. For more information -on campaigns, see CreateCampaign. +about updating a campaign, including code samples, see Updating a campaign. For more +information about campaigns, see Creating a campaign. # Arguments - `campaign_arn`: The Amazon Resource Name (ARN) of the campaign. @@ -2705,7 +2926,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary. -- `"solutionVersionArn"`: The ARN of a new solution version to deploy. +- `"solutionVersionArn"`: The Amazon Resource Name (ARN) of a new model to deploy. To + specify the latest solution version of your solution, specify the ARN of your solution in + SolutionArn/LATEST format. You must use this format if you set + syncWithLatestSolutionVersion to True in the CampaignConfig. To deploy a model that isn't + the latest solution version of your solution, specify the ARN of the solution version. + For more information about automatic campaign updates, see Enabling automatic campaign + updates. """ function update_campaign(campaignArn; aws_config::AbstractAWSConfig=global_aws_config()) return personalize( @@ -2730,6 +2957,48 @@ function update_campaign( ) end +""" + update_dataset(dataset_arn, schema_arn) + update_dataset(dataset_arn, schema_arn, params::Dict{String,<:Any}) + +Update a dataset to replace its schema with a new or existing one. For more information, +see Replacing a dataset's schema. + +# Arguments +- `dataset_arn`: The Amazon Resource Name (ARN) of the dataset that you want to update. +- `schema_arn`: The Amazon Resource Name (ARN) of the new schema you want use. + +""" +function update_dataset( + datasetArn, schemaArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize( + "UpdateDataset", + Dict{String,Any}("datasetArn" => datasetArn, "schemaArn" => schemaArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_dataset( + datasetArn, + schemaArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "UpdateDataset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("datasetArn" => datasetArn, "schemaArn" => schemaArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_metric_attribution() update_metric_attribution(params::Dict{String,<:Any}) diff --git a/src/services/personalize_events.jl b/src/services/personalize_events.jl index 442eb99ffe..fc20ef7761 100644 --- a/src/services/personalize_events.jl +++ b/src/services/personalize_events.jl @@ -4,18 +4,115 @@ using AWS.AWSServices: personalize_events using AWS.Compat using AWS.UUIDs +""" + put_action_interactions(action_interactions, tracking_id) + put_action_interactions(action_interactions, tracking_id, params::Dict{String,<:Any}) + +Records action interaction event data. An action interaction event is an interaction +between a user and an action. For example, a user taking an action, such a enrolling in a +membership program or downloading your app. For more information about recording action +interactions, see Recording action interaction events. For more information about actions +in an Actions dataset, see Actions dataset. + +# Arguments +- `action_interactions`: A list of action interaction events from the session. +- `tracking_id`: The ID of your action interaction event tracker. When you create an Action + interactions dataset, Amazon Personalize creates an action interaction event tracker for + you. For more information, see Action interaction event tracker ID. + +""" +function put_action_interactions( + actionInteractions, trackingId; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize_events( + "POST", + "/action-interactions", + Dict{String,Any}( + "actionInteractions" => actionInteractions, "trackingId" => trackingId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_action_interactions( + actionInteractions, + trackingId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize_events( + "POST", + "/action-interactions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "actionInteractions" => actionInteractions, "trackingId" => trackingId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_actions(actions, dataset_arn) + put_actions(actions, dataset_arn, params::Dict{String,<:Any}) + +Adds one or more actions to an Actions dataset. For more information see Importing actions +individually. + +# Arguments +- `actions`: A list of action data. +- `dataset_arn`: The Amazon Resource Name (ARN) of the Actions dataset you are adding the + action or actions to. + +""" +function put_actions(actions, datasetArn; aws_config::AbstractAWSConfig=global_aws_config()) + return personalize_events( + "POST", + "/actions", + Dict{String,Any}("actions" => actions, "datasetArn" => datasetArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_actions( + actions, + datasetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize_events( + "POST", + "/actions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("actions" => actions, "datasetArn" => datasetArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_events(event_list, session_id, tracking_id) put_events(event_list, session_id, tracking_id, params::Dict{String,<:Any}) -Records user interaction event data. For more information see Recording Events. +Records item interaction event data. For more information see Recording item interaction +events. # Arguments - `event_list`: A list of event data from the session. - `session_id`: The session ID associated with the user's visit. Your application generates the sessionId when a user first visits your website or uses your application. Amazon Personalize uses the sessionId to associate events with the user before they log in. For - more information, see Recording Events. + more information, see Recording item interaction events. - `tracking_id`: The tracking ID for the event. The ID is generated by a call to the CreateEventTracker API. @@ -66,8 +163,8 @@ end put_items(dataset_arn, items) put_items(dataset_arn, items, params::Dict{String,<:Any}) -Adds one or more items to an Items dataset. For more information see Importing Items -Incrementally. +Adds one or more items to an Items dataset. For more information see Importing items +individually. # Arguments - `dataset_arn`: The Amazon Resource Name (ARN) of the Items dataset you are adding the @@ -109,8 +206,8 @@ end put_users(dataset_arn, users) put_users(dataset_arn, users, params::Dict{String,<:Any}) -Adds one or more users to a Users dataset. For more information see Importing Users -Incrementally. +Adds one or more users to a Users dataset. For more information see Importing users +individually. # Arguments - `dataset_arn`: The Amazon Resource Name (ARN) of the Users dataset you are adding the diff --git a/src/services/personalize_runtime.jl b/src/services/personalize_runtime.jl index 34187f1264..dfc5cb1f71 100644 --- a/src/services/personalize_runtime.jl +++ b/src/services/personalize_runtime.jl @@ -4,6 +4,56 @@ using AWS.AWSServices: personalize_runtime using AWS.Compat using AWS.UUIDs +""" + get_action_recommendations() + get_action_recommendations(params::Dict{String,<:Any}) + +Returns a list of recommended actions in sorted in descending order by prediction score. +Use the GetActionRecommendations API if you have a custom campaign that deploys a solution +version trained with a PERSONALIZED_ACTIONS recipe. For more information about +PERSONALIZED_ACTIONS recipes, see PERSONALIZED_ACTIONS recipes. For more information about +getting action recommendations, see Getting action recommendations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"campaignArn"`: The Amazon Resource Name (ARN) of the campaign to use for getting action + recommendations. This campaign must deploy a solution version trained with a + PERSONALIZED_ACTIONS recipe. +- `"filterArn"`: The ARN of the filter to apply to the returned recommendations. For more + information, see Filtering Recommendations. When using this parameter, be sure the filter + resource is ACTIVE. +- `"filterValues"`: The values to use when filtering recommendations. For each placeholder + parameter in your filter expression, provide the parameter name (in matching case) as a key + and the filter value(s) as the corresponding value. Separate multiple values for one + parameter with a comma. For filter expressions that use an INCLUDE element to include + actions, you must provide values for all parameters that are defined in the expression. For + filters with expressions that use an EXCLUDE element to exclude actions, you can omit the + filter-values. In this case, Amazon Personalize doesn't use that portion of the expression + to filter recommendations. For more information, see Filtering recommendations and user + segments. +- `"numResults"`: The number of results to return. The default is 5. The maximum is 100. +- `"userId"`: The user ID of the user to provide action recommendations for. +""" +function get_action_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return personalize_runtime( + "POST", + "/action-recommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_action_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize_runtime( + "POST", + "/action-recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_personalized_ranking(campaign_arn, input_list, user_id) get_personalized_ranking(campaign_arn, input_list, user_id, params::Dict{String,<:Any}) @@ -16,7 +66,8 @@ campaign must have been created using a recipe of type PERSONALIZED_RANKING. - `campaign_arn`: The Amazon Resource Name (ARN) of the campaign to use for generating the personalized ranking. - `input_list`: A list of items (by itemId) to rank. If an item was not included in the - training dataset, the item is appended to the end of the reranked list. The maximum is 500. + training dataset, the item is appended to the end of the reranked list. If you are + including metadata in recommendations, the maximum is 50. Otherwise, the maximum is 500. - `user_id`: The user for which you want the campaign to provide a personalized ranking. # Optional Parameters @@ -35,6 +86,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys filters with expressions that use an EXCLUDE element to exclude items, you can omit the filter-values.In this case, Amazon Personalize doesn't use that portion of the expression to filter recommendations. For more information, see Filtering Recommendations. +- `"metadataColumns"`: If you enabled metadata in recommendations when you created or + updated the campaign, specify metadata columns from your Items dataset to include in the + personalized ranking. The map key is ITEMS and the value is a list of column names from + your Items dataset. The maximum number of columns you can provide is 10. For information + about enabling metadata for a campaign, see Enabling metadata in recommendations for a + campaign. """ function get_personalized_ranking( campaignArn, inputList, userId; aws_config::AbstractAWSConfig=global_aws_config() @@ -109,7 +166,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys segments. - `"itemId"`: The item ID to provide recommendations for. Required for RELATED_ITEMS recipe type. -- `"numResults"`: The number of results to return. The default is 25. The maximum is 500. +- `"metadataColumns"`: If you enabled metadata in recommendations when you created or + updated the campaign or recommender, specify the metadata columns from your Items dataset + to include in item recommendations. The map key is ITEMS and the value is a list of column + names from your Items dataset. The maximum number of columns you can provide is 10. For + information about enabling metadata for a campaign, see Enabling metadata in + recommendations for a campaign. For information about enabling metadata for a recommender, + see Enabling metadata in recommendations for a recommender. +- `"numResults"`: The number of results to return. The default is 25. If you are including + metadata in recommendations, the maximum is 50. Otherwise, the maximum is 500. - `"promotions"`: The promotions to apply to the recommendation request. A promotion defines additional business rules that apply to a configurable subset of recommended items. - `"recommenderArn"`: The Amazon Resource Name (ARN) of the recommender to use to get diff --git a/src/services/pi.jl b/src/services/pi.jl index bb5890b14c..8d1ac9c6e8 100644 --- a/src/services/pi.jl +++ b/src/services/pi.jl @@ -4,6 +4,131 @@ using AWS.AWSServices: pi using AWS.Compat using AWS.UUIDs +""" + create_performance_analysis_report(end_time, identifier, service_type, start_time) + create_performance_analysis_report(end_time, identifier, service_type, start_time, params::Dict{String,<:Any}) + +Creates a new performance analysis report for a specific time period for the DB instance. + +# Arguments +- `end_time`: The end time defined for the analysis report. +- `identifier`: An immutable, Amazon Web Services Region-unique identifier for a data + source. Performance Insights gathers metrics from this data source. To use an Amazon RDS + instance as a data source, you specify its DbiResourceId value. For example, specify + db-ADECBTYHKTSAUMUZQYPDS2GW4A. +- `service_type`: The Amazon Web Services service for which Performance Insights will + return metrics. Valid value is RDS. +- `start_time`: The start time defined for the analysis report. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The metadata assigned to the analysis report consisting of a key-value pair. +""" +function create_performance_analysis_report( + EndTime, + Identifier, + ServiceType, + StartTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "CreatePerformanceAnalysisReport", + Dict{String,Any}( + "EndTime" => EndTime, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + "StartTime" => StartTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_performance_analysis_report( + EndTime, + Identifier, + ServiceType, + StartTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "CreatePerformanceAnalysisReport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndTime" => EndTime, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + "StartTime" => StartTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_performance_analysis_report(analysis_report_id, identifier, service_type) + delete_performance_analysis_report(analysis_report_id, identifier, service_type, params::Dict{String,<:Any}) + +Deletes a performance analysis report. + +# Arguments +- `analysis_report_id`: The unique identifier of the analysis report for deletion. +- `identifier`: An immutable identifier for a data source that is unique for an Amazon Web + Services Region. Performance Insights gathers metrics from this data source. In the + console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the + identifier is returned as DbiResourceId. To use a DB instance as a data source, specify its + DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X. +- `service_type`: The Amazon Web Services service for which Performance Insights will + return metrics. Valid value is RDS. + +""" +function delete_performance_analysis_report( + AnalysisReportId, + Identifier, + ServiceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "DeletePerformanceAnalysisReport", + Dict{String,Any}( + "AnalysisReportId" => AnalysisReportId, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_performance_analysis_report( + AnalysisReportId, + Identifier, + ServiceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "DeletePerformanceAnalysisReport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AnalysisReportId" => AnalysisReportId, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dimension_keys(end_time, group_by, identifier, metric, service_type, start_time) describe_dimension_keys(end_time, group_by, identifier, metric, service_type, start_time, params::Dict{String,<:Any}) @@ -199,6 +324,74 @@ function get_dimension_key_details( ) end +""" + get_performance_analysis_report(analysis_report_id, identifier, service_type) + get_performance_analysis_report(analysis_report_id, identifier, service_type, params::Dict{String,<:Any}) + +Retrieves the report including the report ID, status, time details, and the insights with +recommendations. The report status can be RUNNING, SUCCEEDED, or FAILED. The insights +include the description and recommendation fields. + +# Arguments +- `analysis_report_id`: A unique identifier of the created analysis report. For example, + report-12345678901234567 +- `identifier`: An immutable identifier for a data source that is unique for an Amazon Web + Services Region. Performance Insights gathers metrics from this data source. In the + console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the + identifier is returned as DbiResourceId. To use a DB instance as a data source, specify its + DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X. +- `service_type`: The Amazon Web Services service for which Performance Insights will + return metrics. Valid value is RDS. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AcceptLanguage"`: The text language in the report. The default language is EN_US + (English). +- `"TextFormat"`: Indicates the text format in the report. The options are PLAIN_TEXT or + MARKDOWN. The default value is plain text. +""" +function get_performance_analysis_report( + AnalysisReportId, + Identifier, + ServiceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "GetPerformanceAnalysisReport", + Dict{String,Any}( + "AnalysisReportId" => AnalysisReportId, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_performance_analysis_report( + AnalysisReportId, + Identifier, + ServiceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "GetPerformanceAnalysisReport", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AnalysisReportId" => AnalysisReportId, + "Identifier" => Identifier, + "ServiceType" => ServiceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_resource_metadata(identifier, service_type) get_resource_metadata(identifier, service_type, params::Dict{String,<:Any}) @@ -250,9 +443,10 @@ end get_resource_metrics(end_time, identifier, metric_queries, service_type, start_time, params::Dict{String,<:Any}) Retrieve Performance Insights metrics for a set of data sources over a time period. You can -provide specific dimension groups and dimensions, and provide aggregation and filtering -criteria for each group. Each response element returns a maximum of 500 bytes. For larger -elements, such as SQL statements, only the first 500 bytes are returned. +provide specific dimension groups and dimensions, and provide filtering criteria for each +group. You must specify an aggregate function for each metric. Each response element +returns a maximum of 500 bytes. For larger elements, such as SQL statements, only the first +500 bytes are returned. # Arguments - `end_time`: The date and time specifying the end of the requested time series query @@ -265,7 +459,10 @@ elements, such as SQL statements, only the first 500 bytes are returned. identifier is returned as DbiResourceId. To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X. - `metric_queries`: An array of one or more queries to perform. Each query must specify a - Performance Insights metric, and can optionally specify aggregation and filtering criteria. + Performance Insights metric and specify an aggregate function, and you can provide + filtering criteria. You must append the aggregate function to the metric. For example, to + find the average for the metric db.load you must use db.load.avg. Valid values for + aggregate functions include .avg, .min, .max, and .sum. - `service_type`: The Amazon Web Services service for which Performance Insights returns metrics. Valid values are as follows: RDS DOCDB - `start_time`: The date and time specifying the beginning of the requested time series @@ -360,6 +557,11 @@ DB instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AuthorizedActions"`: The actions to discover the dimensions you are authorized to + access. If you specify multiple actions, then the response will contain the dimensions + common for all the actions. When you don't specify this request parameter or provide an + empty list, the response contains all the available dimensions for the target database + engine whether or not you are authorized to access them. - `"MaxResults"`: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. @@ -469,3 +671,210 @@ function list_available_resource_metrics( feature_set=SERVICE_FEATURE_SET, ) end + +""" + list_performance_analysis_reports(identifier, service_type) + list_performance_analysis_reports(identifier, service_type, params::Dict{String,<:Any}) + +Lists all the analysis reports created for the DB instance. The reports are sorted based on +the start time of each report. + +# Arguments +- `identifier`: An immutable identifier for a data source that is unique for an Amazon Web + Services Region. Performance Insights gathers metrics from this data source. In the + console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the + identifier is returned as DbiResourceId. To use a DB instance as a data source, specify its + DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X. +- `service_type`: The Amazon Web Services service for which Performance Insights returns + metrics. Valid value is RDS. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ListTags"`: Specifies whether or not to include the list of tags in the response. +- `"MaxResults"`: The maximum number of items to return in the response. If more items + exist than the specified MaxResults value, a pagination token is included in the response + so that the remaining results can be retrieved. +- `"NextToken"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the token, up to the + value specified by MaxResults. +""" +function list_performance_analysis_reports( + Identifier, ServiceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return pi( + "ListPerformanceAnalysisReports", + Dict{String,Any}("Identifier" => Identifier, "ServiceType" => ServiceType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_performance_analysis_reports( + Identifier, + ServiceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "ListPerformanceAnalysisReports", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Identifier" => Identifier, "ServiceType" => ServiceType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn, service_type) + list_tags_for_resource(resource_arn, service_type, params::Dict{String,<:Any}) + +Retrieves all the metadata tags associated with Amazon RDS Performance Insights resource. + +# Arguments +- `resource_arn`: Lists all the tags for the Amazon RDS Performance Insights resource. This + value is an Amazon Resource Name (ARN). For information about creating an ARN, see + Constructing an RDS Amazon Resource Name (ARN). +- `service_type`: List the tags for the Amazon Web Services service for which Performance + Insights returns metrics. Valid value is RDS. + +""" +function list_tags_for_resource( + ResourceARN, ServiceType; aws_config::AbstractAWSConfig=global_aws_config() +) + return pi( + "ListTagsForResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "ServiceType" => ServiceType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceARN, + ServiceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "ListTagsForResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ServiceType" => ServiceType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, service_type, tags) + tag_resource(resource_arn, service_type, tags, params::Dict{String,<:Any}) + +Adds metadata tags to the Amazon RDS Performance Insights resource. + +# Arguments +- `resource_arn`: The Amazon RDS Performance Insights resource that the tags are added to. + This value is an Amazon Resource Name (ARN). For information about creating an ARN, see + Constructing an RDS Amazon Resource Name (ARN). +- `service_type`: The Amazon Web Services service for which Performance Insights returns + metrics. Valid value is RDS. +- `tags`: The metadata assigned to an Amazon RDS resource consisting of a key-value pair. + +""" +function tag_resource( + ResourceARN, ServiceType, Tags; aws_config::AbstractAWSConfig=global_aws_config() +) + return pi( + "TagResource", + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ServiceType" => ServiceType, "Tags" => Tags + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceARN, + ServiceType, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceARN" => ResourceARN, + "ServiceType" => ServiceType, + "Tags" => Tags, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, service_type, tag_keys) + untag_resource(resource_arn, service_type, tag_keys, params::Dict{String,<:Any}) + +Deletes the metadata tags from the Amazon RDS Performance Insights resource. + +# Arguments +- `resource_arn`: The Amazon RDS Performance Insights resource that the tags are added to. + This value is an Amazon Resource Name (ARN). For information about creating an ARN, see + Constructing an RDS Amazon Resource Name (ARN). +- `service_type`: List the tags for the Amazon Web Services service for which Performance + Insights returns metrics. Valid value is RDS. +- `tag_keys`: The metadata assigned to an Amazon RDS Performance Insights resource + consisting of a key-value pair. + +""" +function untag_resource( + ResourceARN, ServiceType, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return pi( + "UntagResource", + Dict{String,Any}( + "ResourceARN" => ResourceARN, "ServiceType" => ServiceType, "TagKeys" => TagKeys + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceARN, + ServiceType, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pi( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceARN" => ResourceARN, + "ServiceType" => ServiceType, + "TagKeys" => TagKeys, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/pinpoint.jl b/src/services/pinpoint.jl index 25c9b61bae..bf16f5e4a9 100644 --- a/src/services/pinpoint.jl +++ b/src/services/pinpoint.jl @@ -917,7 +917,8 @@ Deletes an endpoint from an application. # Arguments - `application-id`: The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console. -- `endpoint-id`: The unique identifier for the endpoint. +- `endpoint-id`: The case insensitive unique identifier for the endpoint. The identifier + can't contain , { or }. """ function delete_endpoint( @@ -2179,7 +2180,8 @@ application. # Arguments - `application-id`: The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console. -- `endpoint-id`: The unique identifier for the endpoint. +- `endpoint-id`: The case insensitive unique identifier for the endpoint. The identifier + can't contain , { or }. """ function get_endpoint( @@ -3696,8 +3698,9 @@ end remove_attributes(update_attributes_request, application-id, attribute-type) remove_attributes(update_attributes_request, application-id, attribute-type, params::Dict{String,<:Any}) -Removes one or more attributes, of the same attribute type, from all the endpoints that are -associated with an application. +Removes one or more custom attributes, of the same attribute type, from the application. +Existing endpoints still have the attributes but Amazon Pinpoint will stop capturing new or +changed values for these attributes. # Arguments - `update_attributes_request`: @@ -4458,7 +4461,8 @@ attribute, Amazon Pinpoint replaces (overwrites) any existing values with the ne - `endpoint_request`: - `application-id`: The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console. -- `endpoint-id`: The unique identifier for the endpoint. +- `endpoint-id`: The case insensitive unique identifier for the endpoint. The identifier + can't contain , { or }. """ function update_endpoint( diff --git a/src/services/pinpoint_sms_voice_v2.jl b/src/services/pinpoint_sms_voice_v2.jl index 599783ca21..f9c6893414 100644 --- a/src/services/pinpoint_sms_voice_v2.jl +++ b/src/services/pinpoint_sms_voice_v2.jl @@ -9,9 +9,9 @@ using AWS.UUIDs associate_origination_identity(iso_country_code, origination_identity, pool_id, params::Dict{String,<:Any}) Associates the specified origination identity with a pool. If the origination identity is a -phone number and is already associated with another pool, an Error is returned. A sender ID +phone number and is already associated with another pool, an error is returned. A sender ID can be associated with multiple pools. If the origination identity configuration doesn't -match the pool's configuration, an Error is returned. +match the pool's configuration, an error is returned. # Arguments - `iso_country_code`: The new two-character code, in ISO 3166-1 alpha-2 format, for the @@ -73,6 +73,58 @@ function associate_origination_identity( ) end +""" + associate_protect_configuration(configuration_set_name, protect_configuration_id) + associate_protect_configuration(configuration_set_name, protect_configuration_id, params::Dict{String,<:Any}) + +Associate a protect configuration with a configuration set. This replaces the configuration +sets current protect configuration. A configuration set can only be associated with one +protect configuration at a time. A protect configuration can be associated with multiple +configuration sets. + +# Arguments +- `configuration_set_name`: The name of the ConfigurationSet. +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function associate_protect_configuration( + ConfigurationSetName, + ProtectConfigurationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "AssociateProtectConfiguration", + Dict{String,Any}( + "ConfigurationSetName" => ConfigurationSetName, + "ProtectConfigurationId" => ProtectConfigurationId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_protect_configuration( + ConfigurationSetName, + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "AssociateProtectConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationSetName" => ConfigurationSetName, + "ProtectConfigurationId" => ProtectConfigurationId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_configuration_set(configuration_set_name) create_configuration_set(configuration_set_name, params::Dict{String,<:Any}) @@ -145,7 +197,8 @@ single destination, such as a CloudWatch or Kinesis Data Firehose destination. found using the DescribeConfigurationSets action. - `event_destination_name`: The name that identifies the event destination. - `matching_event_types`: An array of event types that determine which events to log. If - \"ALL\" is used, then Amazon Pinpoint logs every event type. + \"ALL\" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is + not supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -153,7 +206,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. If you don't specify a client token, a randomly generated token is used for the request to ensure idempotency. - `"CloudWatchLogsDestination"`: An object that contains information about an event - destination for logging to Amazon CloudWatch logs. + destination for logging to Amazon CloudWatch Logs. - `"KinesisFirehoseDestination"`: An object that contains information about an event destination for logging to Amazon Kinesis Data Firehose. - `"SnsDestination"`: An object that contains information about an event destination for @@ -207,7 +260,7 @@ end create_opt_out_list(opt_out_list_name) create_opt_out_list(opt_out_list_name, params::Dict{String,<:Any}) -Creates a new opt-out list. If the opt-out list name already exists, an Error is returned. +Creates a new opt-out list. If the opt-out list name already exists, an error is returned. An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any @@ -267,7 +320,7 @@ Web Services account. The new pool inherits its configuration from the specified origination identity. This includes keywords, message type, opt-out list, two-way configuration, and self-managed opt-out configuration. Deletion protection isn't inherited from the origination identity and defaults to false. If the origination identity is a phone -number and is already associated with another pool, an Error is returned. A sender ID can +number and is already associated with another pool, an error is returned. A sender ID can be associated with multiple pools. # Arguments @@ -334,6 +387,296 @@ function create_pool( ) end +""" + create_protect_configuration() + create_protect_configuration(params::Dict{String,<:Any}) + +Create a new protect configuration. By default all country rule sets for each capability +are set to ALLOW. Update the country rule sets using +UpdateProtectConfigurationCountryRuleSet. A protect configurations name is stored as a Tag +with the key set to Name and value as the name of the protect configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don't specify a client token, a randomly generated token + is used for the request to ensure idempotency. +- `"DeletionProtectionEnabled"`: When set to true deletion protection is enabled. By + default this is set to false. +- `"Tags"`: An array of key and value pair tags that are associated with the resource. +""" +function create_protect_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return pinpoint_sms_voice_v2( + "CreateProtectConfiguration", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_protect_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateProtectConfiguration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_registration(registration_type) + create_registration(registration_type, params::Dict{String,<:Any}) + +Creates a new registration based on the RegistrationType field. + +# Arguments +- `registration_type`: The type of registration form to create. The list of + RegistrationTypes can be found using the DescribeRegistrationTypeDefinitions action. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don't specify a client token, a randomly generated token + is used for the request to ensure idempotency. +- `"Tags"`: An array of tags (key and value pairs) to associate with the registration. +""" +function create_registration( + RegistrationType; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateRegistration", + Dict{String,Any}( + "RegistrationType" => RegistrationType, "ClientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_registration( + RegistrationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "CreateRegistration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RegistrationType" => RegistrationType, "ClientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_registration_association(registration_id, resource_id) + create_registration_association(registration_id, resource_id, params::Dict{String,<:Any}) + +Associate the registration with an origination identity such as a phone number or sender ID. + +# Arguments +- `registration_id`: The unique identifier for the registration. +- `resource_id`: The unique identifier for the origination identity. For example this could + be a PhoneNumberId or SenderId. + +""" +function create_registration_association( + RegistrationId, ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateRegistrationAssociation", + Dict{String,Any}("RegistrationId" => RegistrationId, "ResourceId" => ResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_registration_association( + RegistrationId, + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "CreateRegistrationAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "RegistrationId" => RegistrationId, "ResourceId" => ResourceId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_registration_attachment() + create_registration_attachment(params::Dict{String,<:Any}) + +Create a new registration attachment to use for uploading a file or a URL to a file. The +maximum file size is 1MiB and valid file extensions are PDF, JPEG and PNG. For example, +many sender ID registrations require a signed “letter of authorization” (LOA) to be +submitted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttachmentBody"`: The registration file to upload. The maximum file size is 1MiB and + valid file extensions are PDF, JPEG and PNG. +- `"AttachmentUrl"`: A URL to the required registration file. For example, you can provide + the S3 object URL. +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don't specify a client token, a randomly generated token + is used for the request to ensure idempotency. +- `"Tags"`: An array of tags (key and value pairs) to associate with the registration + attachment. +""" +function create_registration_attachment(; aws_config::AbstractAWSConfig=global_aws_config()) + return pinpoint_sms_voice_v2( + "CreateRegistrationAttachment", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_registration_attachment( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateRegistrationAttachment", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_registration_version(registration_id) + create_registration_version(registration_id, params::Dict{String,<:Any}) + +Create a new version of the registration and increase the VersionNumber. The previous +version of the registration becomes read-only. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +""" +function create_registration_version( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateRegistrationVersion", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_registration_version( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "CreateRegistrationVersion", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_verified_destination_number(destination_phone_number) + create_verified_destination_number(destination_phone_number, params::Dict{String,<:Any}) + +You can only send messages to verified destination numbers when your account is in the +sandbox. You can add up to 10 verified destination numbers. + +# Arguments +- `destination_phone_number`: The verified destination phone number, in E.164 format. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don't specify a client token, a randomly generated token + is used for the request to ensure idempotency. +- `"Tags"`: An array of tags (key and value pairs) to associate with the destination number. +""" +function create_verified_destination_number( + DestinationPhoneNumber; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "CreateVerifiedDestinationNumber", + Dict{String,Any}( + "DestinationPhoneNumber" => DestinationPhoneNumber, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_verified_destination_number( + DestinationPhoneNumber, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "CreateVerifiedDestinationNumber", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DestinationPhoneNumber" => DestinationPhoneNumber, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_account_default_protect_configuration() + delete_account_default_protect_configuration(params::Dict{String,<:Any}) + +Removes the current account default protect configuration. + +""" +function delete_account_default_protect_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteAccountDefaultProtectConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_account_default_protect_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteAccountDefaultProtectConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_configuration_set(configuration_set_name) delete_configuration_set(configuration_set_name, params::Dict{String,<:Any}) @@ -572,13 +915,43 @@ function delete_keyword( ) end +""" + delete_media_message_spend_limit_override() + delete_media_message_spend_limit_override(params::Dict{String,<:Any}) + +Deletes an account-level monthly spending limit override for sending multimedia messages +(MMS). Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, +which is controlled by Amazon Web Services. For more information on spend limits (quotas) +see Quotas for Server Migration Service in the Server Migration Service User Guide. + +""" +function delete_media_message_spend_limit_override(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteMediaMessageSpendLimitOverride"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_media_message_spend_limit_override( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteMediaMessageSpendLimitOverride", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_opt_out_list(opt_out_list_name) delete_opt_out_list(opt_out_list_name, params::Dict{String,<:Any}) Deletes an existing opt-out list. All opted out phone numbers in the opt-out list are deleted. If the specified opt-out list name doesn't exist or is in-use by an origination -phone number or pool, an Error is returned. +phone number or pool, an error is returned. # Arguments - `opt_out_list_name`: The OptOutListName or OptOutListArn of the OptOutList to delete. You @@ -616,7 +989,7 @@ end Deletes an existing opted out destination phone number from the specified opt-out list. Each destination phone number can only be deleted once every 30 days. If the specified -destination phone number doesn't exist or if the opt-out list doesn't exist, an Error is +destination phone number doesn't exist or if the opt-out list doesn't exist, an error is returned. # Arguments @@ -663,7 +1036,7 @@ end delete_pool(pool_id, params::Dict{String,<:Any}) Deletes an existing pool. Deleting a pool disassociates all origination identities from -that pool. If the pool status isn't active or if deletion protection is enabled, an Error +that pool. If the pool status isn't active or if deletion protection is enabled, an error is returned. A pool is a collection of phone numbers and SenderIds. A pool can include one or more phone numbers and SenderIds that are associated with your Amazon Web Services account. @@ -693,8 +1066,167 @@ function delete_pool( end """ - delete_text_message_spend_limit_override() - delete_text_message_spend_limit_override(params::Dict{String,<:Any}) + delete_protect_configuration(protect_configuration_id) + delete_protect_configuration(protect_configuration_id, params::Dict{String,<:Any}) + +Permanently delete the protect configuration. The protect configuration must have deletion +protection disabled and must not be associated as the account default protect configuration +or associated with a configuration set. + +# Arguments +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function delete_protect_configuration( + ProtectConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteProtectConfiguration", + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_protect_configuration( + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DeleteProtectConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_registration(registration_id) + delete_registration(registration_id, params::Dict{String,<:Any}) + +Permanently delete an existing registration from your account. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +""" +function delete_registration( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteRegistration", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_registration( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DeleteRegistration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_registration_attachment(registration_attachment_id) + delete_registration_attachment(registration_attachment_id, params::Dict{String,<:Any}) + +Permanently delete the specified registration attachment. + +# Arguments +- `registration_attachment_id`: The unique identifier for the registration attachment. + +""" +function delete_registration_attachment( + RegistrationAttachmentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteRegistrationAttachment", + Dict{String,Any}("RegistrationAttachmentId" => RegistrationAttachmentId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_registration_attachment( + RegistrationAttachmentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DeleteRegistrationAttachment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RegistrationAttachmentId" => RegistrationAttachmentId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_registration_field_value(field_path, registration_id) + delete_registration_field_value(field_path, registration_id, params::Dict{String,<:Any}) + +Delete the value in a registration form field. + +# Arguments +- `field_path`: The path to the registration form field. You can use + DescribeRegistrationFieldDefinitions for a list of FieldPaths. +- `registration_id`: The unique identifier for the registration. + +""" +function delete_registration_field_value( + FieldPath, RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteRegistrationFieldValue", + Dict{String,Any}("FieldPath" => FieldPath, "RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_registration_field_value( + FieldPath, + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DeleteRegistrationFieldValue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FieldPath" => FieldPath, "RegistrationId" => RegistrationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_text_message_spend_limit_override() + delete_text_message_spend_limit_override(params::Dict{String,<:Any}) Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is @@ -722,6 +1254,48 @@ function delete_text_message_spend_limit_override( ) end +""" + delete_verified_destination_number(verified_destination_number_id) + delete_verified_destination_number(verified_destination_number_id, params::Dict{String,<:Any}) + +Delete a verified destination phone number. + +# Arguments +- `verified_destination_number_id`: The unique identifier for the verified destination + phone number. + +""" +function delete_verified_destination_number( + VerifiedDestinationNumberId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DeleteVerifiedDestinationNumber", + Dict{String,Any}("VerifiedDestinationNumberId" => VerifiedDestinationNumberId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_verified_destination_number( + VerifiedDestinationNumberId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DeleteVerifiedDestinationNumber", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "VerifiedDestinationNumberId" => VerifiedDestinationNumberId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_voice_message_spend_limit_override() delete_voice_message_spend_limit_override(params::Dict{String,<:Any}) @@ -863,7 +1437,7 @@ A keyword is a word that you can search for on a particular phone number or pool also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. -If you specify a keyword that isn't valid, an Error is returned. +If you specify a keyword that isn't valid, an error is returned. # Arguments - `origination_identity`: The origination identity to use such as a PhoneNumberId, @@ -916,7 +1490,7 @@ Describes the specified opt-out list or all opt-out lists in your account. If yo opt-out list names, the output includes information for only the specified opt-out lists. Opt-out lists include only those that meet the filter criteria. If you don't specify opt-out list names or filters, the output includes information for all opt-out lists. If -you specify an opt-out list name that isn't valid, an Error is returned. +you specify an opt-out list name that isn't valid, an error is returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -952,7 +1526,7 @@ only the specified opted out numbers. If you specify filters, the output include information for only those opted out numbers that meet the filter criteria. If you don't specify opted out numbers or filters, the output includes information for all opted out destination numbers in your opt-out list. If you specify an opted out number that isn't -valid, an Error is returned. +valid, an error is returned. # Arguments - `opt_out_list_name`: The OptOutListName or OptOutListArn of the OptOutList. You can use @@ -1000,7 +1574,7 @@ If you specify phone number IDs, the output includes information for only the sp phone numbers. If you specify filters, the output includes information for only those phone numbers that meet the filter criteria. If you don't specify phone number IDs or filters, the output includes information for all phone numbers. If you specify a phone number ID -that isn't valid, an Error is returned. +that isn't valid, an error is returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1035,7 +1609,7 @@ Retrieves the specified pools or all pools associated with your Amazon Web Servi account. If you specify pool IDs, the output includes information for only the specified pools. If you specify filters, the output includes information for only those pools that meet the filter criteria. If you don't specify pool IDs or filters, the output includes -information for all pools. If you specify a pool ID that isn't valid, an Error is returned. +information for all pools. If you specify a pool ID that isn't valid, an error is returned. A pool is a collection of phone numbers and SenderIds. A pool can include one or more phone numbers and SenderIds that are associated with your Amazon Web Services account. @@ -1062,63 +1636,70 @@ function describe_pools( end """ - describe_sender_ids() - describe_sender_ids(params::Dict{String,<:Any}) + describe_protect_configurations() + describe_protect_configurations(params::Dict{String,<:Any}) -Describes the specified SenderIds or all SenderIds associated with your Amazon Web Services -account. If you specify SenderIds, the output includes information for only the specified -SenderIds. If you specify filters, the output includes information for only those SenderIds -that meet the filter criteria. If you don't specify SenderIds or filters, the output -includes information for all SenderIds. f you specify a sender ID that isn't valid, an -Error is returned. +Retrieves the protect configurations that match any of filters. If a filter isn’t +provided then all protect configurations are returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: An array of SenderIdFilter objects to filter the results. +- `"Filters"`: An array of ProtectConfigurationFilter objects to filter the results. - `"MaxResults"`: The maximum number of results to return per each request. - `"NextToken"`: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. -- `"SenderIds"`: An array of SenderIdAndCountry objects to search for. +- `"ProtectConfigurationIds"`: An array of protect configuration identifiers to search for. """ -function describe_sender_ids(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_protect_configurations(; + aws_config::AbstractAWSConfig=global_aws_config() +) return pinpoint_sms_voice_v2( - "DescribeSenderIds"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeProtectConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function describe_sender_ids( +function describe_protect_configurations( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return pinpoint_sms_voice_v2( - "DescribeSenderIds", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeProtectConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end """ - describe_spend_limits() - describe_spend_limits(params::Dict{String,<:Any}) + describe_registration_attachments() + describe_registration_attachments(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint monthly spend limits for sending voice and text -messages. When you establish an Amazon Web Services account, the account has initial -monthly spend limit in a given Region. For more information on increasing your monthly -spend limit, see Requesting increases to your monthly SMS spending quota for Amazon -Pinpoint in the Amazon Pinpoint User Guide. +Retrieves the specified registration attachments or all registration attachments associated +with your Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of RegistrationAttachmentFilter objects to filter the results. - `"MaxResults"`: The maximum number of results to return per each request. - `"NextToken"`: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. +- `"RegistrationAttachmentIds"`: The unique identifier of registration attachments to find. + This is an array of RegistrationAttachmentId. """ -function describe_spend_limits(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_registration_attachments(; + aws_config::AbstractAWSConfig=global_aws_config() +) return pinpoint_sms_voice_v2( - "DescribeSpendLimits"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeRegistrationAttachments"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function describe_spend_limits( +function describe_registration_attachments( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return pinpoint_sms_voice_v2( - "DescribeSpendLimits", + "DescribeRegistrationAttachments", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1126,65 +1707,45 @@ function describe_spend_limits( end """ - disassociate_origination_identity(iso_country_code, origination_identity, pool_id) - disassociate_origination_identity(iso_country_code, origination_identity, pool_id, params::Dict{String,<:Any}) + describe_registration_field_definitions(registration_type) + describe_registration_field_definitions(registration_type, params::Dict{String,<:Any}) -Removes the specified origination identity from an existing pool. If the origination -identity isn't associated with the specified pool, an Error is returned. +Retrieves the specified registration type field definitions. You can use +DescribeRegistrationFieldDefinitions to view the requirements for creating, filling out, +and submitting each registration type. # Arguments -- `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country - or region. -- `origination_identity`: The origination identity to use such as a PhoneNumberId, - PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values - for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for - SenderId and SenderIdArn. -- `pool_id`: The unique identifier for the pool to disassociate with the origination - identity. This value can be either the PoolId or PoolArn. +- `registration_type`: The type of registration form. The list of RegistrationTypes can be + found using the DescribeRegistrationTypeDefinitions action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency - of the request. If you don't specify a client token, a randomly generated token is used for - the request to ensure idempotency. +- `"FieldPaths"`: An array of paths to the registration form field. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"SectionPath"`: The path to the section of the registration. """ -function disassociate_origination_identity( - IsoCountryCode, - OriginationIdentity, - PoolId; - aws_config::AbstractAWSConfig=global_aws_config(), +function describe_registration_field_definitions( + RegistrationType; aws_config::AbstractAWSConfig=global_aws_config() ) return pinpoint_sms_voice_v2( - "DisassociateOriginationIdentity", - Dict{String,Any}( - "IsoCountryCode" => IsoCountryCode, - "OriginationIdentity" => OriginationIdentity, - "PoolId" => PoolId, - "ClientToken" => string(uuid4()), - ); + "DescribeRegistrationFieldDefinitions", + Dict{String,Any}("RegistrationType" => RegistrationType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function disassociate_origination_identity( - IsoCountryCode, - OriginationIdentity, - PoolId, +function describe_registration_field_definitions( + RegistrationType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return pinpoint_sms_voice_v2( - "DisassociateOriginationIdentity", + "DescribeRegistrationFieldDefinitions", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "IsoCountryCode" => IsoCountryCode, - "OriginationIdentity" => OriginationIdentity, - "PoolId" => PoolId, - "ClientToken" => string(uuid4()), - ), - params, + _merge, Dict{String,Any}("RegistrationType" => RegistrationType), params ), ); aws_config=aws_config, @@ -1193,74 +1754,88 @@ function disassociate_origination_identity( end """ - list_pool_origination_identities(pool_id) - list_pool_origination_identities(pool_id, params::Dict{String,<:Any}) + describe_registration_field_values(registration_id) + describe_registration_field_values(registration_id, params::Dict{String,<:Any}) -Lists all associated origination identities in your pool. If you specify filters, the -output includes information for only those origination identities that meet the filter -criteria. +Retrieves the specified registration field values. # Arguments -- `pool_id`: The unique identifier for the pool. This value can be either the PoolId or - PoolArn. +- `registration_id`: The unique identifier for the registration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: An array of PoolOriginationIdentitiesFilter objects to filter the results.. +- `"FieldPaths"`: An array of paths to the registration form field. - `"MaxResults"`: The maximum number of results to return per each request. - `"NextToken"`: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. +- `"SectionPath"`: The path to the section of the registration. +- `"VersionNumber"`: The version number of the registration. """ -function list_pool_origination_identities( - PoolId; aws_config::AbstractAWSConfig=global_aws_config() +function describe_registration_field_values( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() ) return pinpoint_sms_voice_v2( - "ListPoolOriginationIdentities", - Dict{String,Any}("PoolId" => PoolId); + "DescribeRegistrationFieldValues", + Dict{String,Any}("RegistrationId" => RegistrationId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_pool_origination_identities( - PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function describe_registration_field_values( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return pinpoint_sms_voice_v2( - "ListPoolOriginationIdentities", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + "DescribeRegistrationFieldValues", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - list_tags_for_resource(resource_arn) - list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + describe_registration_section_definitions(registration_type) + describe_registration_section_definitions(registration_type, params::Dict{String,<:Any}) -List all tags associated with a resource. +Retrieves the specified registration section definitions. You can use +DescribeRegistrationSectionDefinitions to view the requirements for creating, filling out, +and submitting each registration type. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource to query for. +- `registration_type`: The type of registration form. The list of RegistrationTypes can be + found using the DescribeRegistrationTypeDefinitions action. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"SectionPaths"`: An array of paths for the registration form section. """ -function list_tags_for_resource( - ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_registration_section_definitions( + RegistrationType; aws_config::AbstractAWSConfig=global_aws_config() ) return pinpoint_sms_voice_v2( - "ListTagsForResource", - Dict{String,Any}("ResourceArn" => ResourceArn); + "DescribeRegistrationSectionDefinitions", + Dict{String,Any}("RegistrationType" => RegistrationType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_tags_for_resource( - ResourceArn, +function describe_registration_section_definitions( + RegistrationType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return pinpoint_sms_voice_v2( - "ListTagsForResource", + "DescribeRegistrationSectionDefinitions", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + mergewith( + _merge, Dict{String,Any}("RegistrationType" => RegistrationType), params + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1268,21 +1843,546 @@ function list_tags_for_resource( end """ - put_keyword(keyword, keyword_message, origination_identity) - put_keyword(keyword, keyword_message, origination_identity, params::Dict{String,<:Any}) + describe_registration_type_definitions() + describe_registration_type_definitions(params::Dict{String,<:Any}) -Creates or updates a keyword configuration on an origination phone number or pool. A +Retrieves the specified registration type definitions. You can use +DescribeRegistrationTypeDefinitions to view the requirements for creating, filling out, and +submitting each registration type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of RegistrationFilter objects to filter the results. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"RegistrationTypes"`: The type of registration form. The list of RegistrationTypes can + be found using the DescribeRegistrationTypeDefinitions action. +""" +function describe_registration_type_definitions(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeRegistrationTypeDefinitions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_registration_type_definitions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeRegistrationTypeDefinitions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_registration_versions(registration_id) + describe_registration_versions(registration_id, params::Dict{String,<:Any}) + +Retrieves the specified registration version. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of RegistrationVersionFilter objects to filter the results. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"VersionNumbers"`: An array of registration version numbers. +""" +function describe_registration_versions( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeRegistrationVersions", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_registration_versions( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DescribeRegistrationVersions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_registrations() + describe_registrations(params::Dict{String,<:Any}) + +Retrieves the specified registrations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of RegistrationFilter objects to filter the results. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"RegistrationIds"`: An array of unique identifiers for each registration. +""" +function describe_registrations(; aws_config::AbstractAWSConfig=global_aws_config()) + return pinpoint_sms_voice_v2( + "DescribeRegistrations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_registrations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeRegistrations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_sender_ids() + describe_sender_ids(params::Dict{String,<:Any}) + +Describes the specified SenderIds or all SenderIds associated with your Amazon Web Services +account. If you specify SenderIds, the output includes information for only the specified +SenderIds. If you specify filters, the output includes information for only those SenderIds +that meet the filter criteria. If you don't specify SenderIds or filters, the output +includes information for all SenderIds. f you specify a sender ID that isn't valid, an +error is returned. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of SenderIdFilter objects to filter the results. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"SenderIds"`: An array of SenderIdAndCountry objects to search for. +""" +function describe_sender_ids(; aws_config::AbstractAWSConfig=global_aws_config()) + return pinpoint_sms_voice_v2( + "DescribeSenderIds"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_sender_ids( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeSenderIds", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + describe_spend_limits() + describe_spend_limits(params::Dict{String,<:Any}) + +Describes the current Amazon Pinpoint monthly spend limits for sending voice and text +messages. When you establish an Amazon Web Services account, the account has initial +monthly spend limit in a given Region. For more information on increasing your monthly +spend limit, see Requesting increases to your monthly SMS spending quota for Amazon +Pinpoint in the Amazon Pinpoint User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +""" +function describe_spend_limits(; aws_config::AbstractAWSConfig=global_aws_config()) + return pinpoint_sms_voice_v2( + "DescribeSpendLimits"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_spend_limits( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeSpendLimits", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_verified_destination_numbers() + describe_verified_destination_numbers(params::Dict{String,<:Any}) + +Retrieves the specified verified destiona numbers. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DestinationPhoneNumbers"`: An array of verified destination phone number, in E.164 + format. +- `"Filters"`: An array of VerifiedDestinationNumberFilter objects to filter the results. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +- `"VerifiedDestinationNumberIds"`: An array of VerifiedDestinationNumberid to retreive. +""" +function describe_verified_destination_numbers(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeVerifiedDestinationNumbers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_verified_destination_numbers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DescribeVerifiedDestinationNumbers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_origination_identity(iso_country_code, origination_identity, pool_id) + disassociate_origination_identity(iso_country_code, origination_identity, pool_id, params::Dict{String,<:Any}) + +Removes the specified origination identity from an existing pool. If the origination +identity isn't associated with the specified pool, an error is returned. + +# Arguments +- `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country + or region. +- `origination_identity`: The origination identity to use such as a PhoneNumberId, + PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values + for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for + SenderId and SenderIdArn. +- `pool_id`: The unique identifier for the pool to disassociate with the origination + identity. This value can be either the PoolId or PoolArn. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency + of the request. If you don't specify a client token, a randomly generated token is used for + the request to ensure idempotency. +""" +function disassociate_origination_identity( + IsoCountryCode, + OriginationIdentity, + PoolId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DisassociateOriginationIdentity", + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "OriginationIdentity" => OriginationIdentity, + "PoolId" => PoolId, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_origination_identity( + IsoCountryCode, + OriginationIdentity, + PoolId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DisassociateOriginationIdentity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "OriginationIdentity" => OriginationIdentity, + "PoolId" => PoolId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_protect_configuration(configuration_set_name, protect_configuration_id) + disassociate_protect_configuration(configuration_set_name, protect_configuration_id, params::Dict{String,<:Any}) + +Disassociate a protect configuration from a configuration set. + +# Arguments +- `configuration_set_name`: The name of the ConfigurationSet. +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function disassociate_protect_configuration( + ConfigurationSetName, + ProtectConfigurationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DisassociateProtectConfiguration", + Dict{String,Any}( + "ConfigurationSetName" => ConfigurationSetName, + "ProtectConfigurationId" => ProtectConfigurationId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_protect_configuration( + ConfigurationSetName, + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DisassociateProtectConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationSetName" => ConfigurationSetName, + "ProtectConfigurationId" => ProtectConfigurationId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + discard_registration_version(registration_id) + discard_registration_version(registration_id, params::Dict{String,<:Any}) + +Discard the current version of the registration. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +""" +function discard_registration_version( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "DiscardRegistrationVersion", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function discard_registration_version( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "DiscardRegistrationVersion", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_protect_configuration_country_rule_set(number_capability, protect_configuration_id) + get_protect_configuration_country_rule_set(number_capability, protect_configuration_id, params::Dict{String,<:Any}) + +Retrieve the CountryRuleSet for the specified NumberCapability from a protect configuration. + +# Arguments +- `number_capability`: The capability type to return the CountryRuleSet for. Valid values + are SMS, VOICE, or MMS. +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function get_protect_configuration_country_rule_set( + NumberCapability, + ProtectConfigurationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "GetProtectConfigurationCountryRuleSet", + Dict{String,Any}( + "NumberCapability" => NumberCapability, + "ProtectConfigurationId" => ProtectConfigurationId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_protect_configuration_country_rule_set( + NumberCapability, + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "GetProtectConfigurationCountryRuleSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "NumberCapability" => NumberCapability, + "ProtectConfigurationId" => ProtectConfigurationId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_pool_origination_identities(pool_id) + list_pool_origination_identities(pool_id, params::Dict{String,<:Any}) + +Lists all associated origination identities in your pool. If you specify filters, the +output includes information for only those origination identities that meet the filter +criteria. + +# Arguments +- `pool_id`: The unique identifier for the pool. This value can be either the PoolId or + PoolArn. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of PoolOriginationIdentitiesFilter objects to filter the results.. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +""" +function list_pool_origination_identities( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "ListPoolOriginationIdentities", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_pool_origination_identities( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "ListPoolOriginationIdentities", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_registration_associations(registration_id) + list_registration_associations(registration_id, params::Dict{String,<:Any}) + +Retreive all of the origination identies that are associated with a registration. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of RegistrationAssociationFilter to apply to the results that are + returned. +- `"MaxResults"`: The maximum number of results to return per each request. +- `"NextToken"`: The token to be used for the next set of paginated results. You don't need + to supply a value for this field in the initial request. +""" +function list_registration_associations( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "ListRegistrationAssociations", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_registration_associations( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "ListRegistrationAssociations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List all tags associated with a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to query for. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "ListTagsForResource", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_keyword(keyword, keyword_message, origination_identity) + put_keyword(keyword, keyword_message, origination_identity, params::Dict{String,<:Any}) + +Creates or updates a keyword configuration on an origination phone number or pool. A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you -specify a keyword that isn't valid, an Error is returned. +specify a keyword that isn't valid, an error is returned. # Arguments - `keyword`: The new keyword to add. -- `keyword_message`: The message associated with the keyword. AUTOMATIC_RESPONSE: A - message is sent to the recipient. OPT_OUT: Keeps the recipient from receiving future - messages. OPT_IN: The recipient wants to receive future messages. +- `keyword_message`: The message associated with the keyword. - `origination_identity`: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values @@ -1291,6 +2391,8 @@ specify a keyword that isn't valid, an Error is returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"KeywordAction"`: The action to perform for the new keyword when it is received. + AUTOMATIC_RESPONSE: A message is sent to the recipient. OPT_OUT: Keeps the recipient from + receiving future messages. OPT_IN: The recipient wants to receive future messages. """ function put_keyword( Keyword, @@ -1339,7 +2441,7 @@ end put_opted_out_number(opt_out_list_name, opted_out_number, params::Dict{String,<:Any}) Creates an opted out destination phone number in the opt-out list. If the destination phone -number isn't valid or if the specified opt-out list doesn't exist, an Error is returned. +number isn't valid or if the specified opt-out list doesn't exist, an error is returned. # Arguments - `opt_out_list_name`: The OptOutListName or OptOutListArn to add the phone number to. @@ -1380,13 +2482,62 @@ function put_opted_out_number( ) end +""" + put_registration_field_value(field_path, registration_id) + put_registration_field_value(field_path, registration_id, params::Dict{String,<:Any}) + +Creates or updates a field value for a registration. + +# Arguments +- `field_path`: The path to the registration form field. You can use + DescribeRegistrationFieldDefinitions for a list of FieldPaths. +- `registration_id`: The unique identifier for the registration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RegistrationAttachmentId"`: The unique identifier for the registration attachment. +- `"SelectChoices"`: An array of values for the form field. +- `"TextValue"`: The text data for a free form field. +""" +function put_registration_field_value( + FieldPath, RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "PutRegistrationFieldValue", + Dict{String,Any}("FieldPath" => FieldPath, "RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_registration_field_value( + FieldPath, + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "PutRegistrationFieldValue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FieldPath" => FieldPath, "RegistrationId" => RegistrationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ release_phone_number(phone_number_id) release_phone_number(phone_number_id, params::Dict{String,<:Any}) Releases an existing origination phone number in your account. Once released, a phone number is no longer available for sending messages. If the origination phone number has -deletion protection enabled or is associated with a pool, an Error is returned. +deletion protection enabled or is associated with a pool, an error is returned. # Arguments - `phone_number_id`: The PhoneNumberId or PhoneNumberArn of the phone number to release. @@ -1418,6 +2569,50 @@ function release_phone_number( ) end +""" + release_sender_id(iso_country_code, sender_id) + release_sender_id(iso_country_code, sender_id, params::Dict{String,<:Any}) + +Releases an existing sender ID in your account. + +# Arguments +- `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country + or region. +- `sender_id`: The sender ID to release. + +""" +function release_sender_id( + IsoCountryCode, SenderId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "ReleaseSenderId", + Dict{String,Any}("IsoCountryCode" => IsoCountryCode, "SenderId" => SenderId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function release_sender_id( + IsoCountryCode, + SenderId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "ReleaseSenderId", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, "SenderId" => SenderId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ request_phone_number(iso_country_code, message_type, number_capabilities, number_type) request_phone_number(iso_country_code, message_type, number_capabilities, number_type, params::Dict{String,<:Any}) @@ -1443,7 +2638,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DeletionProtectionEnabled"`: By default this is set to false. When set to true the phone number can't be deleted. - `"OptOutListName"`: The name of the OptOutList to associate with the phone number. You - can use the OutOutListName or OptPutListArn. + can use the OptOutListName or OptOutListArn. - `"PoolId"`: The pool to associated with the phone number. You can use the PoolId or PoolArn. - `"RegistrationId"`: Use this field to attach your phone number for an external @@ -1451,45 +2646,239 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: An array of tags (key and value pairs) associate with the requested phone number. """ -function request_phone_number( - IsoCountryCode, - MessageType, - NumberCapabilities, - NumberType; +function request_phone_number( + IsoCountryCode, + MessageType, + NumberCapabilities, + NumberType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "RequestPhoneNumber", + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "MessageType" => MessageType, + "NumberCapabilities" => NumberCapabilities, + "NumberType" => NumberType, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function request_phone_number( + IsoCountryCode, + MessageType, + NumberCapabilities, + NumberType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "RequestPhoneNumber", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "MessageType" => MessageType, + "NumberCapabilities" => NumberCapabilities, + "NumberType" => NumberType, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + request_sender_id(iso_country_code, sender_id) + request_sender_id(iso_country_code, sender_id, params::Dict{String,<:Any}) + +Request a new sender ID that doesn't require registration. + +# Arguments +- `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country + or region. +- `sender_id`: The sender ID string to request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don't specify a client token, a randomly generated token + is used for the request to ensure idempotency. +- `"DeletionProtectionEnabled"`: By default this is set to false. When set to true the + sender ID can't be deleted. +- `"MessageTypes"`: The type of message. Valid values are TRANSACTIONAL for messages that + are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or + time-sensitive. +- `"Tags"`: An array of tags (key and value pairs) to associate with the sender ID. +""" +function request_sender_id( + IsoCountryCode, SenderId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "RequestSenderId", + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "SenderId" => SenderId, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function request_sender_id( + IsoCountryCode, + SenderId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "RequestSenderId", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, + "SenderId" => SenderId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + send_destination_number_verification_code(verification_channel, verified_destination_number_id) + send_destination_number_verification_code(verification_channel, verified_destination_number_id, params::Dict{String,<:Any}) + +Before you can send test messages to a verified destination phone number you need to opt-in +the verified destination phone number. Creates a new text message with a verification code +and send it to a verified destination phone number. Once you have the verification code use +VerifyDestinationNumber to opt-in the verified destination phone number to receive messages. + +# Arguments +- `verification_channel`: Choose to send the verification code as an SMS or voice message. +- `verified_destination_number_id`: The unique identifier for the verified destination + phone number. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConfigurationSetName"`: The name of the configuration set to use. This can be either + the ConfigurationSetName or ConfigurationSetArn. +- `"Context"`: You can specify custom data in this field. If you do, that data is logged to + the event destination. +- `"DestinationCountryParameters"`: This field is used for any country-specific + registration requirements. Currently, this setting is only used when you send messages to + recipients in India using a sender ID. For more information see Special requirements for + sending SMS messages to recipients in India. +- `"LanguageCode"`: Choose the language to use for the message. +- `"OriginationIdentity"`: The origination identity of the message. This can be either the + PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. +""" +function send_destination_number_verification_code( + VerificationChannel, + VerifiedDestinationNumberId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "SendDestinationNumberVerificationCode", + Dict{String,Any}( + "VerificationChannel" => VerificationChannel, + "VerifiedDestinationNumberId" => VerifiedDestinationNumberId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_destination_number_verification_code( + VerificationChannel, + VerifiedDestinationNumberId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "SendDestinationNumberVerificationCode", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "VerificationChannel" => VerificationChannel, + "VerifiedDestinationNumberId" => VerifiedDestinationNumberId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + send_media_message(destination_phone_number, origination_identity) + send_media_message(destination_phone_number, origination_identity, params::Dict{String,<:Any}) + +Creates a new multimedia message (MMS) and sends it to a recipient's phone number. + +# Arguments +- `destination_phone_number`: The destination phone number in E.164 format. +- `origination_identity`: The origination identity of the message. This can be either the + PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConfigurationSetName"`: The name of the configuration set to use. This can be either + the ConfigurationSetName or ConfigurationSetArn. +- `"Context"`: You can specify custom data in this field. If you do, that data is logged to + the event destination. +- `"DryRun"`: When set to true, the message is checked and validated, but isn't sent to the + end recipient. +- `"MaxPrice"`: The maximum amount that you want to spend, in US dollars, per each MMS + message. +- `"MediaUrls"`: An array of URLs to each media file to send. The media files have to be + stored in a publicly available S3 bucket. Supported media file formats are listed in MMS + file types, size and character limits. For more information on creating an S3 bucket and + managing objects, see Creating a bucket and Uploading objects in the S3 user guide. +- `"MessageBody"`: The text body of the message. +- `"ProtectConfigurationId"`: The unique identifier of the protect configuration to use. +- `"TimeToLive"`: How long the text message is valid for. By default this is 72 hours. +""" +function send_media_message( + DestinationPhoneNumber, + OriginationIdentity; aws_config::AbstractAWSConfig=global_aws_config(), ) return pinpoint_sms_voice_v2( - "RequestPhoneNumber", + "SendMediaMessage", Dict{String,Any}( - "IsoCountryCode" => IsoCountryCode, - "MessageType" => MessageType, - "NumberCapabilities" => NumberCapabilities, - "NumberType" => NumberType, - "ClientToken" => string(uuid4()), + "DestinationPhoneNumber" => DestinationPhoneNumber, + "OriginationIdentity" => OriginationIdentity, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function request_phone_number( - IsoCountryCode, - MessageType, - NumberCapabilities, - NumberType, +function send_media_message( + DestinationPhoneNumber, + OriginationIdentity, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return pinpoint_sms_voice_v2( - "RequestPhoneNumber", + "SendMediaMessage", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "IsoCountryCode" => IsoCountryCode, - "MessageType" => MessageType, - "NumberCapabilities" => NumberCapabilities, - "NumberType" => NumberType, - "ClientToken" => string(uuid4()), + "DestinationPhoneNumber" => DestinationPhoneNumber, + "OriginationIdentity" => OriginationIdentity, ), params, ), @@ -1529,11 +2918,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaxPrice"`: The maximum amount that you want to spend, in US dollars, per each text message part. A text message can contain multiple parts. - `"MessageBody"`: The body of the text message. -- `"MessageType"`: The type of message. Valid values are TRANSACTIONAL for messages that - are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or - time-sensitive. +- `"MessageType"`: The type of message. Valid values are for messages that are critical or + time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. - `"OriginationIdentity"`: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. +- `"ProtectConfigurationId"`: The unique identifier for the protect configuration. - `"TimeToLive"`: How long the text message is valid for. By default this is 72 hours. """ function send_text_message( @@ -1569,7 +2958,7 @@ end send_voice_message(destination_phone_number, origination_identity) send_voice_message(destination_phone_number, origination_identity, params::Dict{String,<:Any}) -Allows you to send a request that sends a text message through Amazon Pinpoint. This +Allows you to send a request that sends a voice message through Amazon Pinpoint. This operation uses Amazon Polly to convert a text script into a voice message. # Arguments @@ -1591,6 +2980,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys synthesis markup language (SSML). TEXT: This is the default value. When used the maximum character limit is 3000. SSML: When used the maximum character limit is 6000 including SSML tagging. +- `"ProtectConfigurationId"`: The unique identifier for the protect configuration. - `"TimeToLive"`: How long the voice message is valid for. By default this is 72 hours. - `"VoiceId"`: The voice for the Amazon Polly service to use. By default this is set to \"MATTHEW\". @@ -1633,6 +3023,47 @@ function send_voice_message( ) end +""" + set_account_default_protect_configuration(protect_configuration_id) + set_account_default_protect_configuration(protect_configuration_id, params::Dict{String,<:Any}) + +Set a protect configuration as your account default. You can only have one account default +protect configuration at a time. The current account default protect configuration is +replaced with the provided protect configuration. + +# Arguments +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function set_account_default_protect_configuration( + ProtectConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "SetAccountDefaultProtectConfiguration", + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function set_account_default_protect_configuration( + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "SetAccountDefaultProtectConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_default_message_type(configuration_set_name, message_type) set_default_message_type(configuration_set_name, message_type, params::Dict{String,<:Any}) @@ -1739,6 +3170,43 @@ function set_default_sender_id( ) end +""" + set_media_message_spend_limit_override(monthly_limit) + set_media_message_spend_limit_override(monthly_limit, params::Dict{String,<:Any}) + +Sets an account level monthly spend limit override for sending MMS messages. The requested +spend limit must be less than or equal to the MaxLimit, which is set by Amazon Web +Services. + +# Arguments +- `monthly_limit`: The new monthly limit to enforce on text messages. + +""" +function set_media_message_spend_limit_override( + MonthlyLimit; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "SetMediaMessageSpendLimitOverride", + Dict{String,Any}("MonthlyLimit" => MonthlyLimit); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function set_media_message_spend_limit_override( + MonthlyLimit, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "SetMediaMessageSpendLimitOverride", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("MonthlyLimit" => MonthlyLimit), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ set_text_message_spend_limit_override(monthly_limit) set_text_message_spend_limit_override(monthly_limit, params::Dict{String,<:Any}) @@ -1813,6 +3281,41 @@ function set_voice_message_spend_limit_override( ) end +""" + submit_registration_version(registration_id) + submit_registration_version(registration_id, params::Dict{String,<:Any}) + +Submit the specified registration for review and approval. + +# Arguments +- `registration_id`: The unique identifier for the registration. + +""" +function submit_registration_version( + RegistrationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "SubmitRegistrationVersion", + Dict{String,Any}("RegistrationId" => RegistrationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function submit_registration_version( + RegistrationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "SubmitRegistrationVersion", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RegistrationId" => RegistrationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1921,7 +3424,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Enabled"`: When set to true logging is enabled. - `"KinesisFirehoseDestination"`: An object that contains information about an event destination for logging to Kinesis Data Firehose. -- `"MatchingEventTypes"`: An array of event types that determine which events to log. +- `"MatchingEventTypes"`: An array of event types that determine which events to log. The + TEXT_SENT event type is not supported. - `"SnsDestination"`: An object that contains information about an event destination that sends data to Amazon SNS. """ @@ -1970,7 +3474,7 @@ end Updates the configuration of an existing origination phone number. You can update the opt-out list, enable or disable two-way messaging, change the TwoWayChannelArn, enable or disable self-managed opt-outs, and enable or disable deletion protection. If the -origination phone number is associated with a pool, an Error is returned. +origination phone number is associated with a pool, an error is returned. # Arguments - `phone_number_id`: The unique identifier of the phone number. Valid values for this field @@ -1988,6 +3492,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. +- `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to + post inbound SMS messages. - `"TwoWayEnabled"`: By default this is set to false. When set to true you can receive incoming text messages from your end recipients. """ @@ -2040,6 +3546,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys requests. You're also responsible for tracking and honoring opt-out requests. - `"SharedRoutesEnabled"`: Indicates whether shared routes are enabled for the pool. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. +- `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to + post inbound SMS messages. - `"TwoWayEnabled"`: By default this is set to false. When set to true you can receive incoming text messages from your end recipients. """ @@ -2061,3 +3569,205 @@ function update_pool( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_protect_configuration(protect_configuration_id) + update_protect_configuration(protect_configuration_id, params::Dict{String,<:Any}) + +Update the setting for an existing protect configuration. + +# Arguments +- `protect_configuration_id`: The unique identifier for the protect configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeletionProtectionEnabled"`: When set to true deletion protection is enabled. By + default this is set to false. +""" +function update_protect_configuration( + ProtectConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "UpdateProtectConfiguration", + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_protect_configuration( + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "UpdateProtectConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ProtectConfigurationId" => ProtectConfigurationId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_protect_configuration_country_rule_set(country_rule_set_updates, number_capability, protect_configuration_id) + update_protect_configuration_country_rule_set(country_rule_set_updates, number_capability, protect_configuration_id, params::Dict{String,<:Any}) + +Update a country rule set to ALLOW or BLOCK messages to be sent to the specified +destination counties. You can update one or multiple countries at a time. The updates are +only applied to the specified NumberCapability type. + +# Arguments +- `country_rule_set_updates`: A map of ProtectConfigurationCountryRuleSetInformation + objects that contain the details for the requested NumberCapability. The Key is the + two-letter ISO country code. For a list of supported ISO country codes, see Supported + countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. +- `number_capability`: The number capability to apply the CountryRuleSetUpdates updates to. +- `protect_configuration_id`: The unique identifier for the protect configuration. + +""" +function update_protect_configuration_country_rule_set( + CountryRuleSetUpdates, + NumberCapability, + ProtectConfigurationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "UpdateProtectConfigurationCountryRuleSet", + Dict{String,Any}( + "CountryRuleSetUpdates" => CountryRuleSetUpdates, + "NumberCapability" => NumberCapability, + "ProtectConfigurationId" => ProtectConfigurationId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_protect_configuration_country_rule_set( + CountryRuleSetUpdates, + NumberCapability, + ProtectConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "UpdateProtectConfigurationCountryRuleSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CountryRuleSetUpdates" => CountryRuleSetUpdates, + "NumberCapability" => NumberCapability, + "ProtectConfigurationId" => ProtectConfigurationId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_sender_id(iso_country_code, sender_id) + update_sender_id(iso_country_code, sender_id, params::Dict{String,<:Any}) + +Updates the configuration of an existing sender ID. + +# Arguments +- `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country + or region. +- `sender_id`: The sender ID to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeletionProtectionEnabled"`: By default this is set to false. When set to true the + sender ID can't be deleted. +""" +function update_sender_id( + IsoCountryCode, SenderId; aws_config::AbstractAWSConfig=global_aws_config() +) + return pinpoint_sms_voice_v2( + "UpdateSenderId", + Dict{String,Any}("IsoCountryCode" => IsoCountryCode, "SenderId" => SenderId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_sender_id( + IsoCountryCode, + SenderId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "UpdateSenderId", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IsoCountryCode" => IsoCountryCode, "SenderId" => SenderId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + verify_destination_number(verification_code, verified_destination_number_id) + verify_destination_number(verification_code, verified_destination_number_id, params::Dict{String,<:Any}) + +Use the verification code that was received by the verified destination phone number to +opt-in the verified destination phone number to receive more messages. + +# Arguments +- `verification_code`: The verification code that was received by the verified destination + phone number. +- `verified_destination_number_id`: The unique identifier for the verififed destination + phone number. + +""" +function verify_destination_number( + VerificationCode, + VerifiedDestinationNumberId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "VerifyDestinationNumber", + Dict{String,Any}( + "VerificationCode" => VerificationCode, + "VerifiedDestinationNumberId" => VerifiedDestinationNumberId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function verify_destination_number( + VerificationCode, + VerifiedDestinationNumberId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pinpoint_sms_voice_v2( + "VerifyDestinationNumber", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "VerificationCode" => VerificationCode, + "VerifiedDestinationNumberId" => VerifiedDestinationNumberId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/pipes.jl b/src/services/pipes.jl index a8e782d130..120f596630 100644 --- a/src/services/pipes.jl +++ b/src/services/pipes.jl @@ -23,9 +23,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DesiredState"`: The state the pipe should be in. - `"Enrichment"`: The ARN of the enrichment resource. - `"EnrichmentParameters"`: The parameters required to set up enrichment on your pipe. +- `"LogConfiguration"`: The logging configuration settings for the pipe. - `"SourceParameters"`: The parameters required to set up a source for your pipe. - `"Tags"`: The list of key-value pairs to associate with the pipe. -- `"TargetParameters"`: The parameters required to set up a target for your pipe. +- `"TargetParameters"`: The parameters required to set up a target for your pipe. For more + information about pipe target parameters, including how to use dynamic path parameters, see + Target parameters in the Amazon EventBridge User Guide. """ function create_pipe( Name, RoleArn, Source, Target; aws_config::AbstractAWSConfig=global_aws_config() @@ -334,14 +337,15 @@ end update_pipe(name, role_arn) update_pipe(name, role_arn, params::Dict{String,<:Any}) -Update an existing pipe. When you call UpdatePipe, only the fields that are included in the -request are changed, the rest are unchanged. The exception to this is if you modify any -Amazon Web Services-service specific fields in the SourceParameters, EnrichmentParameters, -or TargetParameters objects. The fields in these objects are updated atomically as one and -override existing values. This is by design and means that if you don't specify an optional -field in one of these Parameters objects, that field will be set to its system-default -value after the update. For more information about pipes, see Amazon EventBridge Pipes in -the Amazon EventBridge User Guide. +Update an existing pipe. When you call UpdatePipe, EventBridge only the updates fields you +have specified in the request; the rest remain unchanged. The exception to this is if you +modify any Amazon Web Services-service specific fields in the SourceParameters, +EnrichmentParameters, or TargetParameters objects. For example, DynamoDBStreamParameters or +EventBridgeEventBusParameters. EventBridge updates the fields in these objects atomically +as one and overrides existing values. This is by design, and means that if you don't +specify an optional field in one of these Parameters objects, EventBridge sets that field +to its system-default value during the update. For more information about pipes, see +Amazon EventBridge Pipes in the Amazon EventBridge User Guide. # Arguments - `name`: The name of the pipe. @@ -353,9 +357,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DesiredState"`: The state the pipe should be in. - `"Enrichment"`: The ARN of the enrichment resource. - `"EnrichmentParameters"`: The parameters required to set up enrichment on your pipe. +- `"LogConfiguration"`: The logging configuration settings for the pipe. - `"SourceParameters"`: The parameters required to set up a source for your pipe. - `"Target"`: The ARN of the target resource. -- `"TargetParameters"`: The parameters required to set up a target for your pipe. +- `"TargetParameters"`: The parameters required to set up a target for your pipe. For more + information about pipe target parameters, including how to use dynamic path parameters, see + Target parameters in the Amazon EventBridge User Guide. """ function update_pipe(Name, RoleArn; aws_config::AbstractAWSConfig=global_aws_config()) return pipes( diff --git a/src/services/polly.jl b/src/services/polly.jl index b67655c8a4..a2abc255d7 100644 --- a/src/services/polly.jl +++ b/src/services/polly.jl @@ -57,8 +57,8 @@ voices. This operation requires permissions to perform the polly:DescribeVoices # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Engine"`: Specifies the engine (standard or neural) used by Amazon Polly when - processing input text for speech synthesis. +- `"Engine"`: Specifies the engine (standard, neural, long-form or generative) used by + Amazon Polly when processing input text for speech synthesis. - `"IncludeAdditionalLanguageCodes"`: Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice @@ -276,9 +276,9 @@ synthesis task. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Engine"`: Specifies the engine (standard or neural) for Amazon Polly to use when - processing input text for speech synthesis. Using a voice that is not supported for the - engine selected will result in an error. +- `"Engine"`: Specifies the engine (standard, neural, long-form or generative) for Amazon + Polly to use when processing input text for speech synthesis. Using a voice that is not + supported for the engine selected will result in an error. - `"LanguageCode"`: Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is @@ -292,8 +292,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OutputS3KeyPrefix"`: The Amazon S3 key prefix for the output speech file. - `"SampleRate"`: The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\", and \"24000\". The default value for - standard voices is \"22050\". The default value for neural voices is \"24000\". Valid - values for pcm are \"8000\" and \"16000\" The default value is \"16000\". + standard voices is \"22050\". The default value for neural voices is \"24000\". The default + value for long-form voices is \"24000\". The default value for generative voices is + \"24000\". Valid values for pcm are \"8000\" and \"16000\" The default value is \"16000\". - `"SnsTopicArn"`: ARN for the SNS topic optionally used for providing status notification for a speech synthesis task. - `"SpeechMarkTypes"`: The type of speech marks returned for the input text. @@ -369,15 +370,13 @@ used. For more information, see How it Works. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Engine"`: Specifies the engine (standard or neural) for Amazon Polly to use when - processing input text for speech synthesis. For information on Amazon Polly voices and - which voices are available in standard-only, NTTS-only, and both standard and NTTS formats, - see Available Voices. NTTS-only voices When using NTTS-only voices such as Kevin (en-US), - this parameter is required and must be set to neural. If the engine is not specified, or is - set to standard, this will result in an error. Type: String Valid Values: standard | - neural Required: Yes Standard voices For standard voices, this is not required; the - engine parameter defaults to standard. If the engine is not specified, or is set to - standard and an NTTS-only voice is selected, this will result in an error. +- `"Engine"`: Specifies the engine (standard, neural, long-form, or generative) for Amazon + Polly to use when processing input text for speech synthesis. Provide an engine that is + supported by the voice you select. If you don't provide an engine, the standard engine is + selected by default. If a chosen voice isn't supported by the standard engine, this will + result in an error. For information on Amazon Polly voices and which voices are available + for each engine, see Available Voices. Type: String Valid Values: standard | neural | + long-form | generative Required: Yes - `"LanguageCode"`: Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is @@ -390,8 +389,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys same as the language of the voice. For information about storing lexicons, see PutLexicon. - `"SampleRate"`: The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\", and \"24000\". The default value for - standard voices is \"22050\". The default value for neural voices is \"24000\". Valid - values for pcm are \"8000\" and \"16000\" The default value is \"16000\". + standard voices is \"22050\". The default value for neural voices is \"24000\". The default + value for long-form voices is \"24000\". The default value for generative voices is + \"24000\". Valid values for pcm are \"8000\" and \"16000\" The default value is \"16000\". - `"SpeechMarkTypes"`: The type of speech marks returned for the input text. - `"TextType"`: Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML. diff --git a/src/services/pricing.jl b/src/services/pricing.jl index 4b37434cc1..658be4db4b 100644 --- a/src/services/pricing.jl +++ b/src/services/pricing.jl @@ -98,13 +98,13 @@ end Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you -retrieve from the ListPriceLists response. +retrieve from the ListPriceLists response. # Arguments - `file_format`: The format that you want to retrieve your Price List files in. The - FileFormat can be obtained from the ListPriceLists response. + FileFormat can be obtained from the ListPriceLists response. - `price_list_arn`: The unique identifier that maps to where your Price List files are - located. PriceListArn can be obtained from the ListPriceLists response. + located. PriceListArn can be obtained from the ListPriceLists response. """ function get_price_list_file_url( @@ -193,7 +193,7 @@ EffectiveDate. Use without a RegionCode filter to list Price List references fro available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the -GetPriceListFileUrl API. +GetPriceListFileUrl API. # Arguments - `currency_code`: The three alphabetical character ISO-4217 currency code that the Price @@ -202,9 +202,9 @@ GetPriceListFileUrl API. - `service_code`: The service code or the Savings Plan service code for the attributes that you want to retrieve. For example, to get the list of applicable Amazon EC2 price lists, use AmazonEC2. For a full list of service codes containing On-Demand and Reserved Instance - (RI) pricing, use the DescribeServices API. To retrieve the Compute Savings Plan price - lists, use ComputeSavingsPlans. To retrieve Machine Learning Savings Plans price lists, use - MachineLearningSavingsPlans. + (RI) pricing, use the DescribeServices API. To retrieve the Reserved Instance and Compute + Savings Plan price lists, use ComputeSavingsPlans. To retrieve Machine Learning Savings + Plans price lists, use MachineLearningSavingsPlans. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -214,7 +214,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"RegionCode"`: This is used to filter the Price List by Amazon Web Services Region. For example, to get the price list only for the US East (N. Virginia) Region, use us-east-1. If nothing is specified, you retrieve price lists for all applicable Regions. The available - RegionCode list can be retrieved from GetAttributeValues API. + RegionCode list can be retrieved from GetAttributeValues API. """ function list_price_lists( CurrencyCode, diff --git a/src/services/privatenetworks.jl b/src/services/privatenetworks.jl index 6120179103..0179387eef 100644 --- a/src/services/privatenetworks.jl +++ b/src/services/privatenetworks.jl @@ -100,6 +100,14 @@ Activates the specified network site. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. +- `"commitmentConfiguration"`: Determines the duration and renewal status of the commitment + period for all pending radio units. If you include commitmentConfiguration in the + ActivateNetworkSiteRequest action, you must specify the following: The commitment period + for the radio unit. You can choose a 60-day, 1-year, or 3-year period. Whether you want + your commitment period to automatically renew for one more year after your current + commitment period expires. For pricing, see Amazon Web Services Private 5G Pricing. If + you do not include commitmentConfiguration in the ActivateNetworkSiteRequest action, the + commitment period is set to 60-days. """ function activate_network_site( networkSiteArn, shippingAddress; aws_config::AbstractAWSConfig=global_aws_config() @@ -855,21 +863,38 @@ end start_network_resource_update(network_resource_arn, update_type) start_network_resource_update(network_resource_arn, update_type, params::Dict{String,<:Any}) -Starts an update of the specified network resource. After you submit a request to replace -or return a network resource, the status of the network resource is -CREATING_SHIPPING_LABEL. The shipping label is available when the status of the network -resource is PENDING_RETURN. After the network resource is successfully returned, its status -is DELETED. For more information, see Return a radio unit. +Use this action to do the following tasks: Update the duration and renewal status of the +commitment period for a radio unit. The update goes into effect immediately. Request a +replacement for a network resource. Request that you return a network resource. After +you submit a request to replace or return a network resource, the status of the network +resource changes to CREATING_SHIPPING_LABEL. The shipping label is available when the +status of the network resource is PENDING_RETURN. After the network resource is +successfully returned, its status changes to DELETED. For more information, see Return a +radio unit. # Arguments - `network_resource_arn`: The Amazon Resource Name (ARN) of the network resource. - `update_type`: The update type. REPLACE - Submits a request to replace a defective radio unit. We provide a shipping label that you can use for the return process and we ship - a replacement radio unit to you. RETURN - Submits a request to replace a radio unit that + a replacement radio unit to you. RETURN - Submits a request to return a radio unit that you no longer need. We provide a shipping label that you can use for the return process. + COMMITMENT - Submits a request to change or renew the commitment period. If you choose this + value, then you must set commitmentConfiguration . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"commitmentConfiguration"`: Use this action to extend and automatically renew the + commitment period for the radio unit. You can do the following: Change a 60-day + commitment to a 1-year or 3-year commitment. The change is immediate and the hourly rate + decreases to the rate for the new commitment period. Change a 1-year commitment to a + 3-year commitment. The change is immediate and the hourly rate decreases to the rate for + the 3-year commitment period. Set a 1-year commitment to automatically renew for an + additional 1 year. The hourly rate for the additional year will continue to be the same as + your existing 1-year rate. Set a 3-year commitment to automatically renew for an + additional 1 year. The hourly rate for the additional year will continue to be the same as + your existing 3-year rate. Turn off a previously-enabled automatic renewal on a 1-year or + 3-year commitment. You cannot use the automatic-renewal option for a 60-day commitment. + For pricing, see Amazon Web Services Private 5G Pricing. - `"returnReason"`: The reason for the return. Providing a reason for a return is optional. - `"shippingAddress"`: The shipping address. If you don't provide a shipping address when replacing or returning a network resource, we use the address from the original order for diff --git a/src/services/proton.jl b/src/services/proton.jl index 436d3bbe4f..6fe5474b4c 100644 --- a/src/services/proton.jl +++ b/src/services/proton.jl @@ -1074,6 +1074,35 @@ function delete_component( ) end +""" + delete_deployment(id) + delete_deployment(id, params::Dict{String,<:Any}) + +Delete the deployment. + +# Arguments +- `id`: The ID of the deployment to delete. + +""" +function delete_deployment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return proton( + "DeleteDeployment", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_deployment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "DeleteDeployment", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_environment(name) delete_environment(name, params::Dict{String,<:Any}) @@ -1522,6 +1551,42 @@ function get_component( ) end +""" + get_deployment(id) + get_deployment(id, params::Dict{String,<:Any}) + +Get detailed data for a deployment. + +# Arguments +- `id`: The ID of the deployment that you want to get the detailed data for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentName"`: The name of a component that you want to get the detailed data for. +- `"environmentName"`: The name of a environment that you want to get the detailed data for. +- `"serviceInstanceName"`: The name of the service instance associated with the given + deployment ID. serviceName must be specified to identify the service instance. +- `"serviceName"`: The name of the service associated with the given deployment ID. +""" +function get_deployment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return proton( + "GetDeployment", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_deployment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "GetDeployment", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_environment(name) get_environment(name, params::Dict{String,<:Any}) @@ -2192,6 +2257,7 @@ components, see Proton components in the Proton User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ @@ -2292,6 +2358,38 @@ function list_components( ) end +""" + list_deployments() + list_deployments(params::Dict{String,<:Any}) + +List deployments. You can filter the result list by environment, service, or a single +service instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentName"`: The name of a component for result list filtering. Proton returns + deployments associated with that component. +- `"environmentName"`: The name of an environment for result list filtering. Proton returns + deployments associated with the environment. +- `"maxResults"`: The maximum number of deployments to list. +- `"nextToken"`: A token that indicates the location of the next deployment in the array of + deployment, after the list of deployment that was previously requested. +- `"serviceInstanceName"`: The name of a service instance for result list filtering. Proton + returns the deployments associated with the service instance. +- `"serviceName"`: The name of a service for result list filtering. Proton returns + deployments associated with service instances of the service. +""" +function list_deployments(; aws_config::AbstractAWSConfig=global_aws_config()) + return proton("ListDeployments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_deployments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "ListDeployments", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_environment_account_connections(requested_by) list_environment_account_connections(requested_by, params::Dict{String,<:Any}) @@ -2348,6 +2446,7 @@ List the infrastructure as code outputs for your environment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next environment output in the array of environment outputs, after the list of environment outputs that was previously requested. @@ -2617,6 +2716,7 @@ Get a list service of instance Infrastructure as Code (IaC) outputs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ @@ -2754,6 +2854,7 @@ Get a list of service pipeline Infrastructure as Code (IaC) outputs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment you want the outputs for. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ diff --git a/src/services/qbusiness.jl b/src/services/qbusiness.jl new file mode 100644 index 0000000000..9fb58ab1af --- /dev/null +++ b/src/services/qbusiness.jl @@ -0,0 +1,2493 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qbusiness +using AWS.Compat +using AWS.UUIDs + +""" + batch_delete_document(application_id, documents, index_id) + batch_delete_document(application_id, documents, index_id, params::Dict{String,<:Any}) + +Asynchronously deletes one or more documents added using the BatchPutDocument API from an +Amazon Q Business index. You can see the progress of the deletion, and any error messages +related to the process, by using CloudWatch. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. +- `documents`: Documents deleted from the Amazon Q Business index. +- `index_id`: The identifier of the Amazon Q Business index that contains the documents to + delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceSyncId"`: The identifier of the data source sync during which the documents + were deleted. +""" +function batch_delete_document( + applicationId, documents, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/documents/delete", + Dict{String,Any}("documents" => documents); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_document( + applicationId, + documents, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/documents/delete", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("documents" => documents), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_put_document(application_id, documents, index_id) + batch_put_document(application_id, documents, index_id, params::Dict{String,<:Any}) + +Adds one or more documents to an Amazon Q Business index. You use this API to: ingest +your structured and unstructured documents and documents stored in an Amazon S3 bucket into +an Amazon Q Business index. add custom attributes to documents in an Amazon Q Business +index. attach an access control list to the documents added to an Amazon Q Business +index. You can see the progress of the deletion, and any error messages related to the +process, by using CloudWatch. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. +- `documents`: One or more documents to add to the index. +- `index_id`: The identifier of the Amazon Q Business index to add the documents to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceSyncId"`: The identifier of the data source sync during which the documents + were added. +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permission to access your + S3 bucket. +""" +function batch_put_document( + applicationId, documents, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/documents", + Dict{String,Any}("documents" => documents); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_put_document( + applicationId, + documents, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/documents", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("documents" => documents), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + chat_sync(application_id) + chat_sync(application_id, params::Dict{String,<:Any}) + +Starts or continues a non-streaming Amazon Q Business conversation. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application linked to the + Amazon Q Business conversation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"actionExecution"`: A request from an end user to perform an Amazon Q Business plugin + action. +- `"attachments"`: A list of files uploaded directly during chat. You can upload a maximum + of 5 files of upto 10 MB each. +- `"attributeFilter"`: Enables filtering of Amazon Q Business web experience responses + based on document attributes or metadata fields. +- `"authChallengeResponse"`: An authentication verification event response by a third party + authentication server to Amazon Q Business. +- `"chatMode"`: The chat modes available to an Amazon Q Business end user. + RETRIEVAL_MODE - The default chat mode for an Amazon Q Business application. When this mode + is enabled, Amazon Q Business generates responses only from data sources connected to an + Amazon Q Business application. CREATOR_MODE - By selecting this mode, users can choose + to generate responses only from the LLM knowledge, without consulting connected data + sources, for a chat request. PLUGIN_MODE - By selecting this mode, users can choose to + use plugins in chat. For more information, see Admin controls and guardrails, Plugins, + and Conversation settings. +- `"chatModeConfiguration"`: The chat mode configuration for an Amazon Q Business + application. +- `"clientToken"`: A token that you provide to identify a chat request. +- `"conversationId"`: The identifier of the Amazon Q Business conversation. +- `"parentMessageId"`: The identifier of the previous system message in a conversation. +- `"userGroups"`: The groups that a user associated with the chat input belongs to. +- `"userId"`: The identifier of the user attached to the chat input. +- `"userMessage"`: A end user message in a conversation. +""" +function chat_sync(applicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "POST", + "/applications/$(applicationId)/conversations?sync", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function chat_sync( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/conversations?sync", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_application(display_name) + create_application(display_name, params::Dict{String,<:Any}) + +Creates an Amazon Q Business application. There are new tiers for Amazon Q Business. Not +all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For +information on what's included in Amazon Q Business Lite and what's included in Amazon Q +Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to +assign subscription tiers to users. + +# Arguments +- `display_name`: A name for the Amazon Q Business application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attachmentsConfiguration"`: An option to allow end users to upload files directly + during chat. +- `"clientToken"`: A token that you provide to identify the request to create your Amazon Q + Business application. +- `"description"`: A description for the Amazon Q Business application. +- `"encryptionConfiguration"`: The identifier of the KMS key that is used to encrypt your + data. Amazon Q Business doesn't support asymmetric keys. +- `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center + instance you are either creating for—or connecting to—your Amazon Q Business + application. +- `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in + the web experience. +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permissions to access + your Amazon CloudWatch logs and metrics. +- `"tags"`: A list of key-value pairs that identify or categorize your Amazon Q Business + application. You can also use tags to help control access to the application. Tag keys and + values can consist of Unicode letters, digits, white space, and any of the following + symbols: _ . : / = + - @. +""" +function create_application(displayName; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "POST", + "/applications", + Dict{String,Any}("displayName" => displayName, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_application( + displayName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_data_source(application_id, configuration, display_name, index_id) + create_data_source(application_id, configuration, display_name, index_id, params::Dict{String,<:Any}) + +Creates a data source connector for an Amazon Q Business application. CreateDataSource is +a synchronous operation. The operation returns 200 if the data source was successfully +created. Otherwise, an exception is raised. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application the data source + will be attached to. +- `configuration`: Configuration information to connect to your data source repository. For + configuration templates for your specific data source, see Supported connectors. +- `display_name`: A name for the data source connector. +- `index_id`: The identifier of the index that you want to use with the data source + connector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token you provide to identify a request to create a data source + connector. Multiple calls to the CreateDataSource API with the same client token will + create only one data source connector. +- `"description"`: A description for the data source connector. +- `"documentEnrichmentConfiguration"`: +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permission to access the + data source and required resources. +- `"syncSchedule"`: Sets the frequency for Amazon Q Business to check the documents in your + data source repository and update your index. If you don't set a schedule, Amazon Q + Business won't periodically update the index. Specify a cron- format schedule string or an + empty string to indicate that the index is updated on demand. You can't specify the + Schedule parameter when the Type parameter is set to CUSTOM. If you do, you receive a + ValidationException exception. +- `"tags"`: A list of key-value pairs that identify or categorize the data source + connector. You can also use tags to help control access to the data source connector. Tag + keys and values can consist of Unicode letters, digits, white space, and any of the + following symbols: _ . : / = + - @. +- `"vpcConfiguration"`: Configuration information for an Amazon VPC (Virtual Private Cloud) + to connect to your data source. For more information, see Using Amazon VPC with Amazon Q + Business connectors. +""" +function create_data_source( + applicationId, + configuration, + displayName, + indexId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources", + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_source( + applicationId, + configuration, + displayName, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_index(application_id, display_name) + create_index(application_id, display_name, params::Dict{String,<:Any}) + +Creates an Amazon Q Business index. To determine if index creation has completed, check the +Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when +the index is ready to use. Once the index is active, you can index your documents using the + BatchPutDocument API or the CreateDataSource API. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application using the index. +- `display_name`: A name for the Amazon Q Business index. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"capacityConfiguration"`: The capacity units you want to provision for your index. You + can add and remove capacity to fit your usage needs. +- `"clientToken"`: A token that you provide to identify the request to create an index. + Multiple calls to the CreateIndex API with the same client token will create only one index. +- `"description"`: A description for the Amazon Q Business index. +- `"tags"`: A list of key-value pairs that identify or categorize the index. You can also + use tags to help control access to the index. Tag keys and values can consist of Unicode + letters, digits, white space, and any of the following symbols: _ . : / = + - @. +- `"type"`: The index type that's suitable for your needs. For more information on what's + included in each type of index, see Amazon Q Business tiers. +""" +function create_index( + applicationId, displayName; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices", + Dict{String,Any}("displayName" => displayName, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_index( + applicationId, + displayName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "displayName" => displayName, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_plugin(application_id, auth_configuration, display_name, type) + create_plugin(application_id, auth_configuration, display_name, type, params::Dict{String,<:Any}) + +Creates an Amazon Q Business plugin. + +# Arguments +- `application_id`: The identifier of the application that will contain the plugin. +- `auth_configuration`: +- `display_name`: A the name for your plugin. +- `type`: The type of plugin you want to create. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that you provide to identify the request to create your Amazon Q + Business plugin. +- `"customPluginConfiguration"`: Contains configuration for a custom plugin. +- `"serverUrl"`: The source URL used for plugin configuration. +- `"tags"`: A list of key-value pairs that identify or categorize the data source + connector. You can also use tags to help control access to the data source connector. Tag + keys and values can consist of Unicode letters, digits, white space, and any of the + following symbols: _ . : / = + - @. +""" +function create_plugin( + applicationId, + authConfiguration, + displayName, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/plugins", + Dict{String,Any}( + "authConfiguration" => authConfiguration, + "displayName" => displayName, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_plugin( + applicationId, + authConfiguration, + displayName, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/plugins", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "authConfiguration" => authConfiguration, + "displayName" => displayName, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_retriever(application_id, configuration, display_name, type) + create_retriever(application_id, configuration, display_name, type, params::Dict{String,<:Any}) + +Adds a retriever to your Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of your Amazon Q Business application. +- `configuration`: +- `display_name`: The name of your retriever. +- `type`: The type of retriever you are using. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that you provide to identify the request to create your Amazon Q + Business application retriever. +- `"roleArn"`: The ARN of an IAM role used by Amazon Q Business to access the basic + authentication credentials stored in a Secrets Manager secret. +- `"tags"`: A list of key-value pairs that identify or categorize the retriever. You can + also use tags to help control access to the retriever. Tag keys and values can consist of + Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. +""" +function create_retriever( + applicationId, + configuration, + displayName, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/retrievers", + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "type" => type, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_retriever( + applicationId, + configuration, + displayName, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/retrievers", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "displayName" => displayName, + "type" => type, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_user(application_id, user_id) + create_user(application_id, user_id, params::Dict{String,<:Any}) + +Creates a universally unique identifier (UUID) mapped to a list of local user ids within an +application. + +# Arguments +- `application_id`: The identifier of the application for which the user mapping will be + created. +- `user_id`: The user emails attached to a user mapping. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token that you provide to identify the request to create your Amazon Q + Business user mapping. +- `"userAliases"`: The list of user aliases in the mapping. +""" +function create_user( + applicationId, userId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/users", + Dict{String,Any}("userId" => userId, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_user( + applicationId, + userId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/users", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("userId" => userId, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_web_experience(application_id) + create_web_experience(application_id, params::Dict{String,<:Any}) + +Creates an Amazon Q Business web experience. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business web experience. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A token you provide to identify a request to create an Amazon Q Business + web experience. +- `"roleArn"`: The Amazon Resource Name (ARN) of the service role attached to your web + experience. +- `"samplePromptsControlMode"`: Determines whether sample prompts are enabled in the web + experience for an end user. +- `"subtitle"`: A subtitle to personalize your Amazon Q Business web experience. +- `"tags"`: A list of key-value pairs that identify or categorize your Amazon Q Business + web experience. You can also use tags to help control access to the web experience. Tag + keys and values can consist of Unicode letters, digits, white space, and any of the + following symbols: _ . : / = + - @. +- `"title"`: The title for your Amazon Q Business web experience. +- `"welcomeMessage"`: The customized welcome message for end users of an Amazon Q Business + web experience. +""" +function create_web_experience( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/experiences", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_web_experience( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/experiences", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_application(application_id) + delete_application(application_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. + +""" +function delete_application( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_application( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_chat_controls_configuration(application_id) + delete_chat_controls_configuration(application_id, params::Dict{String,<:Any}) + +Deletes chat controls configured for an existing Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the application the chat controls have been + configured for. + +""" +function delete_chat_controls_configuration( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/chatcontrols"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_chat_controls_configuration( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/chatcontrols", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_conversation(application_id, conversation_id) + delete_conversation(application_id, conversation_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business web experience conversation. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application associated with the + conversation. +- `conversation_id`: The identifier of the Amazon Q Business web experience conversation + being deleted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"userId"`: The identifier of the user who is deleting the conversation. +""" +function delete_conversation( + applicationId, conversationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/conversations/$(conversationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_conversation( + applicationId, + conversationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/conversations/$(conversationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_data_source(application_id, data_source_id, index_id) + delete_data_source(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business data source connector. While the data source is being deleted, +the Status field returned by a call to the DescribeDataSource API is set to DELETING. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application used with the data + source connector. +- `data_source_id`: The identifier of the data source connector that you want to delete. +- `index_id`: The identifier of the index used with the data source connector. + +""" +function delete_data_source( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_source( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_group(application_id, group_name, index_id) + delete_group(application_id, group_name, index_id, params::Dict{String,<:Any}) + +Deletes a group so that all users and sub groups that belong to the group can no longer +access documents only available to that group. For example, after deleting the group +\"Summer Interns\", all interns who belonged to that group no longer see intern-only +documents in their chat results. If you want to delete, update, or replace users or sub +groups of a group, you need to use the PutGroup operation. For example, if a user in the +group \"Engineering\" leaves the engineering team and another user takes their place, you +provide an updated list of users or sub groups that belong to the \"Engineering\" group +when calling PutGroup. + +# Arguments +- `application_id`: The identifier of the application in which the group mapping belongs. +- `group_name`: The name of the group you want to delete. +- `index_id`: The identifier of the index you want to delete the group from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceId"`: The identifier of the data source linked to the group A group can be + tied to multiple data sources. You can delete a group from accessing documents in a certain + data source. For example, the groups \"Research\", \"Engineering\", and \"Sales and + Marketing\" are all tied to the company's documents stored in the data sources Confluence + and Salesforce. You want to delete \"Research\" and \"Engineering\" groups from Salesforce, + so that these groups cannot access customer-related documents stored in Salesforce. Only + \"Sales and Marketing\" should access documents in the Salesforce data source. +""" +function delete_group( + applicationId, groupName, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)/groups/$(groupName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_group( + applicationId, + groupName, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)/groups/$(groupName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_index(application_id, index_id) + delete_index(application_id, index_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business index. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application the Amazon Q + Business index is linked to. +- `index_id`: The identifier of the Amazon Q Business index. + +""" +function delete_index( + applicationId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_index( + applicationId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/indices/$(indexId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_plugin(application_id, plugin_id) + delete_plugin(application_id, plugin_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business plugin. + +# Arguments +- `application_id`: The identifier the application attached to the Amazon Q Business plugin. +- `plugin_id`: The identifier of the plugin being deleted. + +""" +function delete_plugin( + applicationId, pluginId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/plugins/$(pluginId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_plugin( + applicationId, + pluginId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/plugins/$(pluginId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_retriever(application_id, retriever_id) + delete_retriever(application_id, retriever_id, params::Dict{String,<:Any}) + +Deletes the retriever used by an Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application using the retriever. +- `retriever_id`: The identifier of the retriever being deleted. + +""" +function delete_retriever( + applicationId, retrieverId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/retrievers/$(retrieverId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_retriever( + applicationId, + retrieverId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/retrievers/$(retrieverId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_user(application_id, user_id) + delete_user(application_id, user_id, params::Dict{String,<:Any}) + +Deletes a user by email id. + +# Arguments +- `application_id`: The identifier of the application from which the user is being deleted. +- `user_id`: The user email being deleted. + +""" +function delete_user( + applicationId, userId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/users/$(userId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_user( + applicationId, + userId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/users/$(userId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_web_experience(application_id, web_experience_id) + delete_web_experience(application_id, web_experience_id, params::Dict{String,<:Any}) + +Deletes an Amazon Q Business web experience. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application linked to the + Amazon Q Business web experience. +- `web_experience_id`: The identifier of the Amazon Q Business web experience being deleted. + +""" +function delete_web_experience( + applicationId, webExperienceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/experiences/$(webExperienceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_web_experience( + applicationId, + webExperienceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/applications/$(applicationId)/experiences/$(webExperienceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_application(application_id) + get_application(application_id, params::Dict{String,<:Any}) + +Gets information about an existing Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. + +""" +function get_application(applicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", + "/applications/$(applicationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_chat_controls_configuration(application_id) + get_chat_controls_configuration(application_id, params::Dict{String,<:Any}) + +Gets information about an chat controls configured for an existing Amazon Q Business +application. + +# Arguments +- `application_id`: The identifier of the application for which the chat controls are + configured. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of configured chat controls to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business chat controls configured. +""" +function get_chat_controls_configuration( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/chatcontrols"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_chat_controls_configuration( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/chatcontrols", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_data_source(application_id, data_source_id, index_id) + get_data_source(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Gets information about an existing Amazon Q Business data source connector. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. +- `data_source_id`: The identifier of the data source connector. +- `index_id`: The identfier of the index used with the data source connector. + +""" +function get_data_source( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_source( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_group(application_id, group_name, index_id) + get_group(application_id, group_name, index_id, params::Dict{String,<:Any}) + +Describes a group by group name. + +# Arguments +- `application_id`: The identifier of the application id the group is attached to. +- `group_name`: The name of the group. +- `index_id`: The identifier of the index the group is attached to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceId"`: The identifier of the data source the group is attached to. +""" +function get_group( + applicationId, groupName, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/groups/$(groupName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_group( + applicationId, + groupName, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/groups/$(groupName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_index(application_id, index_id) + get_index(application_id, index_id, params::Dict{String,<:Any}) + +Gets information about an existing Amazon Q Business index. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application connected to the + index. +- `index_id`: The identifier of the Amazon Q Business index you want information on. + +""" +function get_index( + applicationId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_index( + applicationId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_plugin(application_id, plugin_id) + get_plugin(application_id, plugin_id, params::Dict{String,<:Any}) + +Gets information about an existing Amazon Q Business plugin. + +# Arguments +- `application_id`: The identifier of the application which contains the plugin. +- `plugin_id`: The identifier of the plugin. + +""" +function get_plugin( + applicationId, pluginId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/plugins/$(pluginId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_plugin( + applicationId, + pluginId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/plugins/$(pluginId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_retriever(application_id, retriever_id) + get_retriever(application_id, retriever_id, params::Dict{String,<:Any}) + +Gets information about an existing retriever used by an Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application using the retriever. +- `retriever_id`: The identifier of the retriever. + +""" +function get_retriever( + applicationId, retrieverId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/retrievers/$(retrieverId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_retriever( + applicationId, + retrieverId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/retrievers/$(retrieverId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_user(application_id, user_id) + get_user(application_id, user_id, params::Dict{String,<:Any}) + +Describes the universally unique identifier (UUID) associated with a local user in a data +source. + +# Arguments +- `application_id`: The identifier of the application connected to the user. +- `user_id`: The user email address attached to the user. + +""" +function get_user(applicationId, userId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", + "/applications/$(applicationId)/users/$(userId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_user( + applicationId, + userId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/users/$(userId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_web_experience(application_id, web_experience_id) + get_web_experience(application_id, web_experience_id, params::Dict{String,<:Any}) + +Gets information about an existing Amazon Q Business web experience. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application linked to the web + experience. +- `web_experience_id`: The identifier of the Amazon Q Business web experience. + +""" +function get_web_experience( + applicationId, webExperienceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/experiences/$(webExperienceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_web_experience( + applicationId, + webExperienceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/experiences/$(webExperienceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_applications() + list_applications(params::Dict{String,<:Any}) + +Lists Amazon Q Business applications. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of Amazon Q Business applications to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business applications. +""" +function list_applications(; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", "/applications"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_applications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_conversations(application_id) + list_conversations(application_id, params::Dict{String,<:Any}) + +Lists one or more Amazon Q Business conversations. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of Amazon Q Business conversations to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business conversations. +- `"userId"`: The identifier of the user involved in the Amazon Q Business web experience + conversation. +""" +function list_conversations( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/conversations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_conversations( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/conversations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_source_sync_jobs(application_id, data_source_id, index_id) + list_data_source_sync_jobs(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Get information about an Amazon Q Business data source connector synchronization. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application connected to the + data source. +- `data_source_id`: The identifier of the data source connector. +- `index_id`: The identifier of the index used with the Amazon Q Business data source + connector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"endTime"`: The end time of the data source connector sync. +- `"maxResults"`: The maximum number of synchronization jobs to return in the response. +- `"nextToken"`: If the maxResults response was incpmplete because there is more data to + retriever, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of responses. +- `"startTime"`: The start time of the data source connector sync. +- `"syncStatus"`: Only returns synchronization jobs with the Status field equal to the + specified status. +""" +function list_data_source_sync_jobs( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/syncjobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_source_sync_jobs( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/syncjobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_sources(application_id, index_id) + list_data_sources(application_id, index_id, params::Dict{String,<:Any}) + +Lists the Amazon Q Business data source connectors that you have created. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application linked to the data + source connectors. +- `index_id`: The identifier of the index used with one or more data source connectors. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of data source connectors to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business data source connectors. +""" +function list_data_sources( + applicationId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_sources( + applicationId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/datasources", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_documents(application_id, index_id) + list_documents(application_id, index_id, params::Dict{String,<:Any}) + +A list of documents attached to an index. + +# Arguments +- `application_id`: The identifier of the application id the documents are attached to. +- `index_id`: The identifier of the index the documents are attached to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceIds"`: The identifier of the data sources the documents are attached to. +- `"maxResults"`: The maximum number of documents to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of documents. +""" +function list_documents( + applicationId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/index/$(indexId)/documents"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_documents( + applicationId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/index/$(indexId)/documents", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_groups(application_id, index_id, updated_earlier_than) + list_groups(application_id, index_id, updated_earlier_than, params::Dict{String,<:Any}) + +Provides a list of groups that are mapped to users. + +# Arguments +- `application_id`: The identifier of the application for getting a list of groups mapped + to users. +- `index_id`: The identifier of the index for getting a list of groups mapped to users. +- `updated_earlier_than`: The timestamp identifier used for the latest PUT or DELETE action + for mapping users to their groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceId"`: The identifier of the data source for getting a list of groups mapped + to users. +- `"maxResults"`: The maximum number of returned groups that are mapped to users. +- `"nextToken"`: If the previous response was incomplete (because there is more data to + retrieve), Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of groups that are mapped to users. +""" +function list_groups( + applicationId, + indexId, + updatedEarlierThan; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/groups", + Dict{String,Any}("updatedEarlierThan" => updatedEarlierThan); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_groups( + applicationId, + indexId, + updatedEarlierThan, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices/$(indexId)/groups", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("updatedEarlierThan" => updatedEarlierThan), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_indices(application_id) + list_indices(application_id, params::Dict{String,<:Any}) + +Lists the Amazon Q Business indices you have created. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application connected to the + index. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of indices to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business indices. +""" +function list_indices(applicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_indices( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/indices", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_messages(application_id, conversation_id) + list_messages(application_id, conversation_id, params::Dict{String,<:Any}) + +Gets a list of messages associated with an Amazon Q Business web experience. + +# Arguments +- `application_id`: The identifier for the Amazon Q Business application. +- `conversation_id`: The identifier of the Amazon Q Business web experience conversation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of messages to return. +- `"nextToken"`: If the number of retrievers returned exceeds maxResults, Amazon Q Business + returns a next token as a pagination token to retrieve the next set of messages. +- `"userId"`: The identifier of the user involved in the Amazon Q Business web experience + conversation. +""" +function list_messages( + applicationId, conversationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/conversations/$(conversationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_messages( + applicationId, + conversationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/conversations/$(conversationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_plugins(application_id) + list_plugins(application_id, params::Dict{String,<:Any}) + +Lists configured Amazon Q Business plugins. + +# Arguments +- `application_id`: The identifier of the application the plugin is attached to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of documents to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of plugins. +""" +function list_plugins(applicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", + "/applications/$(applicationId)/plugins"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_plugins( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/plugins", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_retrievers(application_id) + list_retrievers(application_id, params::Dict{String,<:Any}) + +Lists the retriever used by an Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application using the retriever. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of retrievers returned. +- `"nextToken"`: If the number of retrievers returned exceeds maxResults, Amazon Q Business + returns a next token as a pagination token to retrieve the next set of retrievers. +""" +function list_retrievers(applicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "GET", + "/applications/$(applicationId)/retrievers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_retrievers( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/retrievers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Gets a list of tags associated with a specified resource. Amazon Q Business applications +and data sources can have tags associated with them. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Q Business application or + data source to get a list of tags for. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/v1/tags/$(resourceARN)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/v1/tags/$(resourceARN)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_web_experiences(application_id) + list_web_experiences(application_id, params::Dict{String,<:Any}) + +Lists one or more Amazon Q Business Web Experiences. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application linked to the + listed web experiences. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of Amazon Q Business Web Experiences to return. +- `"nextToken"`: If the maxResults response was incomplete because there is more data to + retrieve, Amazon Q Business returns a pagination token in the response. You can use this + pagination token to retrieve the next set of Amazon Q Business conversations. +""" +function list_web_experiences( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "GET", + "/applications/$(applicationId)/experiences"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_web_experiences( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "GET", + "/applications/$(applicationId)/experiences", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_feedback(application_id, conversation_id, message_id) + put_feedback(application_id, conversation_id, message_id, params::Dict{String,<:Any}) + +Enables your end user to provide feedback on their Amazon Q Business generated chat +responses. + +# Arguments +- `application_id`: The identifier of the application associated with the feedback. +- `conversation_id`: The identifier of the conversation the feedback is attached to. +- `message_id`: The identifier of the chat message that the feedback was given for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"messageCopiedAt"`: The timestamp for when the feedback was recorded. +- `"messageUsefulness"`: The feedback usefulness value given by the user to the chat + message. +- `"userId"`: The identifier of the user giving the feedback. +""" +function put_feedback( + applicationId, + conversationId, + messageId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/conversations/$(conversationId)/messages/$(messageId)/feedback"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_feedback( + applicationId, + conversationId, + messageId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/conversations/$(conversationId)/messages/$(messageId)/feedback", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_group(application_id, group_members, group_name, index_id, type) + put_group(application_id, group_members, group_name, index_id, type, params::Dict{String,<:Any}) + +Create, or updates, a mapping of users—who have access to a document—to groups. You can +also map sub groups to groups. For example, the group \"Company Intellectual Property +Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include +their own list of users or people who work in these teams. Only users who work in research +and engineering, and therefore belong in the intellectual property group, can see +top-secret company documents in their Amazon Q Business chat results. + +# Arguments +- `application_id`: The identifier of the application in which the user and group mapping + belongs. +- `group_members`: +- `group_name`: The list that contains your users or sub groups that belong the same group. + For example, the group \"Company\" includes the user \"CEO\" and the sub groups + \"Research\", \"Engineering\", and \"Sales and Marketing\". If you have more than 1000 + users and/or sub groups for a single group, you need to provide the path to the S3 file + that lists your users and sub groups for a group. Your sub groups can contain more than + 1000 users, but the list of sub groups that belong to a group (and/or users) must be no + more than 1000. +- `index_id`: The identifier of the index in which you want to map users to their groups. +- `type`: The type of the group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dataSourceId"`: The identifier of the data source for which you want to map users to + their groups. This is useful if a group is tied to multiple data sources, but you only want + the group to access documents of a certain data source. For example, the groups + \"Research\", \"Engineering\", and \"Sales and Marketing\" are all tied to the company's + documents stored in the data sources Confluence and Salesforce. However, \"Sales and + Marketing\" team only needs access to customer-related documents stored in Salesforce. +""" +function put_group( + applicationId, + groupMembers, + groupName, + indexId, + type; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)/groups", + Dict{String,Any}( + "groupMembers" => groupMembers, "groupName" => groupName, "type" => type + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_group( + applicationId, + groupMembers, + groupName, + indexId, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)/groups", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "groupMembers" => groupMembers, "groupName" => groupName, "type" => type + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_data_source_sync_job(application_id, data_source_id, index_id) + start_data_source_sync_job(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Starts a data source connector synchronization job. If a synchronization job is already in +progress, Amazon Q Business returns a ConflictException. + +# Arguments +- `application_id`: The identifier of Amazon Q Business application the data source is + connected to. +- `data_source_id`: The identifier of the data source connector. +- `index_id`: The identifier of the index used with the data source connector. + +""" +function start_data_source_sync_job( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/startsync"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_data_source_sync_job( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/startsync", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_data_source_sync_job(application_id, data_source_id, index_id) + stop_data_source_sync_job(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Stops an Amazon Q Business data source connector synchronization job already in progress. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application that the data + source is connected to. +- `data_source_id`: The identifier of the data source connector. +- `index_id`: The identifier of the index used with the Amazon Q Business data source + connector. + +""" +function stop_data_source_sync_job( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/stopsync"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_data_source_sync_job( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)/stopsync", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds the specified tag to the specified Amazon Q Business application or data source +resource. If the tag already exists, the existing value is replaced with the new value. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Q Business application or + data source to tag. +- `tags`: A list of tag keys to add to the Amazon Q Business application or data source. If + a tag already exists, the existing value is replaced with the new value. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qbusiness( + "POST", + "/v1/tags/$(resourceARN)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "POST", + "/v1/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag from an Amazon Q Business application or a data source. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Q Business application, or + data source to remove the tag from. +- `tag_keys`: A list of tag keys to remove from the Amazon Q Business application or data + source. If a tag key does not exist on the resource, it is ignored. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "DELETE", + "/v1/tags/$(resourceARN)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "DELETE", + "/v1/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_application(application_id) + update_application(application_id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attachmentsConfiguration"`: An option to allow end users to upload files directly + during chat. +- `"description"`: A description for the Amazon Q Business application. +- `"displayName"`: A name for the Amazon Q Business application. +- `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center + instance you are either creating for—or connecting to—your Amazon Q Business + application. +- `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in + the web experience. +- `"roleArn"`: An Amazon Web Services Identity and Access Management (IAM) role that gives + Amazon Q Business permission to access Amazon CloudWatch logs and metrics. +""" +function update_application( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_application( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_chat_controls_configuration(application_id) + update_chat_controls_configuration(application_id, params::Dict{String,<:Any}) + +Updates an set of chat controls configured for an existing Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of the application for which the chat controls are + configured. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"blockedPhrasesConfigurationUpdate"`: The phrases blocked from chat by your chat control + configuration. +- `"clientToken"`: A token that you provide to identify the request to update a Amazon Q + Business application chat configuration. +- `"creatorModeConfiguration"`: The configuration details for CREATOR_MODE. +- `"responseScope"`: The response scope configured for your application. This determines + whether your application uses its retrieval augmented generation (RAG) system to generate + answers only from your enterprise data, or also uses the large language models (LLM) + knowledge to respons to end user questions in chat. +- `"topicConfigurationsToCreateOrUpdate"`: The configured topic specific chat controls you + want to update. +- `"topicConfigurationsToDelete"`: The configured topic specific chat controls you want to + delete. +""" +function update_chat_controls_configuration( + applicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PATCH", + "/applications/$(applicationId)/chatcontrols", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_chat_controls_configuration( + applicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PATCH", + "/applications/$(applicationId)/chatcontrols", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_data_source(application_id, data_source_id, index_id) + update_data_source(application_id, data_source_id, index_id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q Business data source connector. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application the data source is + attached to. +- `data_source_id`: The identifier of the data source connector. +- `index_id`: The identifier of the index attached to the data source connector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: +- `"description"`: The description of the data source connector. +- `"displayName"`: A name of the data source connector. +- `"documentEnrichmentConfiguration"`: +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permission to access the + data source and required resources. +- `"syncSchedule"`: The chosen update frequency for your data source. +- `"vpcConfiguration"`: +""" +function update_data_source( + applicationId, dataSourceId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_data_source( + applicationId, + dataSourceId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_index(application_id, index_id) + update_index(application_id, index_id, params::Dict{String,<:Any}) + +Updates an Amazon Q Business index. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application connected to the + index. +- `index_id`: The identifier of the Amazon Q Business index. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"capacityConfiguration"`: The storage capacity units you want to provision for your + Amazon Q Business index. You can add and remove capacity to fit your usage needs. +- `"description"`: The description of the Amazon Q Business index. +- `"displayName"`: The name of the Amazon Q Business index. +- `"documentAttributeConfigurations"`: Configuration information for document metadata or + fields. Document metadata are fields or attributes associated with your documents. For + example, the company department name associated with each document. For more information, + see Understanding document attributes. +""" +function update_index( + applicationId, indexId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_index( + applicationId, + indexId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/indices/$(indexId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_plugin(application_id, plugin_id) + update_plugin(application_id, plugin_id, params::Dict{String,<:Any}) + +Updates an Amazon Q Business plugin. + +# Arguments +- `application_id`: The identifier of the application the plugin is attached to. +- `plugin_id`: The identifier of the plugin. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"authConfiguration"`: The authentication configuration the plugin is using. +- `"customPluginConfiguration"`: The configuration for a custom plugin. +- `"displayName"`: The name of the plugin. +- `"serverUrl"`: The source URL used for plugin configuration. +- `"state"`: The status of the plugin. +""" +function update_plugin( + applicationId, pluginId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/plugins/$(pluginId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_plugin( + applicationId, + pluginId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/plugins/$(pluginId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_retriever(application_id, retriever_id) + update_retriever(application_id, retriever_id, params::Dict{String,<:Any}) + +Updates the retriever used for your Amazon Q Business application. + +# Arguments +- `application_id`: The identifier of your Amazon Q Business application. +- `retriever_id`: The identifier of your retriever. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: +- `"displayName"`: The name of your retriever. +- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permission to access the + retriever and required resources. +""" +function update_retriever( + applicationId, retrieverId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/retrievers/$(retrieverId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_retriever( + applicationId, + retrieverId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/retrievers/$(retrieverId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_user(application_id, user_id) + update_user(application_id, user_id, params::Dict{String,<:Any}) + +Updates a information associated with a user id. + +# Arguments +- `application_id`: The identifier of the application the user is attached to. +- `user_id`: The email id attached to the user. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"userAliasesToDelete"`: The user aliases attached to the user id that are to be deleted. +- `"userAliasesToUpdate"`: The user aliases attached to the user id that are to be updated. +""" +function update_user( + applicationId, userId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/users/$(userId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_user( + applicationId, + userId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/users/$(userId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_web_experience(application_id, web_experience_id) + update_web_experience(application_id, web_experience_id, params::Dict{String,<:Any}) + +Updates an Amazon Q Business web experience. + +# Arguments +- `application_id`: The identifier of the Amazon Q Business application attached to the web + experience. +- `web_experience_id`: The identifier of the Amazon Q Business web experience. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"authenticationConfiguration"`: The authentication configuration of the Amazon Q + Business web experience. +- `"roleArn"`: The Amazon Resource Name (ARN) of the role with permission to access the + Amazon Q Business web experience and required resources. +- `"samplePromptsControlMode"`: Determines whether sample prompts are enabled in the web + experience for an end user. +- `"subtitle"`: The subtitle of the Amazon Q Business web experience. +- `"title"`: The title of the Amazon Q Business web experience. +- `"welcomeMessage"`: A customized welcome message for an end user in an Amazon Q Business + web experience. +""" +function update_web_experience( + applicationId, webExperienceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/experiences/$(webExperienceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_web_experience( + applicationId, + webExperienceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qbusiness( + "PUT", + "/applications/$(applicationId)/experiences/$(webExperienceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/qconnect.jl b/src/services/qconnect.jl new file mode 100644 index 0000000000..29c813e3b2 --- /dev/null +++ b/src/services/qconnect.jl @@ -0,0 +1,1944 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qconnect +using AWS.Compat +using AWS.UUIDs + +""" + create_assistant(name, type) + create_assistant(name, type, params::Dict{String,<:Any}) + +Creates an Amazon Q in Connect assistant. + +# Arguments +- `name`: The name of the assistant. +- `type`: The type of assistant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"description"`: The description of the assistant. +- `"serverSideEncryptionConfiguration"`: The configuration information for the customer + managed key used for encryption. The customer managed key must have a policy that allows + kms:CreateGrant, kms:DescribeKey, kms:Decrypt, and kms:GenerateDataKey* permissions to the + IAM identity using the key to invoke Amazon Q in Connect. To use Amazon Q in Connect with + chat, the key policy must also allow kms:Decrypt, kms:GenerateDataKey*, and kms:DescribeKey + permissions to the connect.amazonaws.com service principal. For more information about + setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect + for your instance. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_assistant(name, type; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "POST", + "/assistants", + Dict{String,Any}("name" => name, "type" => type, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_assistant( + name, + type, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "type" => type, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_assistant_association(assistant_id, association, association_type) + create_assistant_association(assistant_id, association, association_type, params::Dict{String,<:Any}) + +Creates an association between an Amazon Q in Connect assistant and another resource. +Currently, the only supported association is with a knowledge base. An assistant can have +only a single association. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `association`: The identifier of the associated resource. +- `association_type`: The type of association. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_assistant_association( + assistantId, + association, + associationType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/associations", + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_assistant_association( + assistantId, + association, + associationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_content(knowledge_base_id, name, upload_id) + create_content(knowledge_base_id, name, upload_id, params::Dict{String,<:Any}) + +Creates Amazon Q in Connect content. Before to calling this API, use StartContentUpload to +upload an asset. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. +- `name`: The name of the content. Each piece of content in a knowledge base must have a + unique name. You can retrieve a piece of content using only its knowledge base and its name + with the SearchContent API. +- `upload_id`: A pointer to the uploaded asset. This value is returned by + StartContentUpload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"metadata"`: A key/value map to store attributes without affecting tagging or + recommendations. For example, when synchronizing data between an external system and Amazon + Q in Connect, you can store an external version identifier as metadata to utilize for + determining drift. +- `"overrideLinkOutUri"`: The URI you want to use for the article. If the knowledge base + has a templateUri, setting this argument overrides it for this piece of content. +- `"tags"`: The tags used to organize, track, or control access for this resource. +- `"title"`: The title of the content. If not set, the title is equal to the name. +""" +function create_content( + knowledgeBaseId, name, uploadId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents", + Dict{String,Any}( + "name" => name, "uploadId" => uploadId, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_content( + knowledgeBaseId, + name, + uploadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "uploadId" => uploadId, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_knowledge_base(knowledge_base_type, name) + create_knowledge_base(knowledge_base_type, name, params::Dict{String,<:Any}) + +Creates a knowledge base. When using this API, you cannot reuse Amazon AppIntegrations +DataIntegrations with external knowledge bases such as Salesforce and ServiceNow. If you +do, you'll get an InvalidRequestException error. For example, you're programmatically +managing your external knowledge base, and you want to add or remove one of the fields that +is being ingested from Salesforce. Do the following: Call DeleteKnowledgeBase. Call +DeleteDataIntegration. Call CreateDataIntegration to recreate the DataIntegration or a +create different one. Call CreateKnowledgeBase. + +# Arguments +- `knowledge_base_type`: The type of knowledge base. Only CUSTOM knowledge bases allow you + to upload your own content. EXTERNAL knowledge bases support integrations with third-party + systems whose content is synchronized automatically. +- `name`: The name of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"description"`: The description. +- `"renderingConfiguration"`: Information about how to render the content. +- `"serverSideEncryptionConfiguration"`: The configuration information for the customer + managed key used for encryption. This KMS key must have a policy that allows + kms:CreateGrant, kms:DescribeKey, kms:Decrypt, and kms:GenerateDataKey* permissions to the + IAM identity using the key to invoke Amazon Q in Connect. For more information about + setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect + for your instance. +- `"sourceConfiguration"`: The source of the knowledge base content. Only set this argument + for EXTERNAL knowledge bases. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_knowledge_base( + knowledgeBaseType, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases", + Dict{String,Any}( + "knowledgeBaseType" => knowledgeBaseType, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_knowledge_base( + knowledgeBaseType, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "knowledgeBaseType" => knowledgeBaseType, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_quick_response(content, knowledge_base_id, name) + create_quick_response(content, knowledge_base_id, name, params::Dict{String,<:Any}) + +Creates an Amazon Q in Connect quick response. + +# Arguments +- `content`: The content of the quick response. +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. +- `name`: The name of the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"channels"`: The Amazon Connect channels this quick response applies to. +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"contentType"`: The media type of the quick response content. Use + application/x.quickresponse;format=plain for a quick response written in plain text. Use + application/x.quickresponse;format=markdown for a quick response written in richtext. +- `"description"`: The description of the quick response. +- `"groupingConfiguration"`: The configuration information of the user groups that the + quick response is accessible to. +- `"isActive"`: Whether the quick response is active. +- `"language"`: The language code value for the language in which the quick response is + written. The supported language codes include de_DE, en_US, es_ES, fr_FR, id_ID, it_IT, + ja_JP, ko_KR, pt_BR, zh_CN, zh_TW +- `"shortcutKey"`: The shortcut key of the quick response. The value should be unique + across the knowledge base. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_quick_response( + content, knowledgeBaseId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + Dict{String,Any}( + "content" => content, "name" => name, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_quick_response( + content, + knowledgeBaseId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "content" => content, "name" => name, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_session(assistant_id, name) + create_session(assistant_id, name, params::Dict{String,<:Any}) + +Creates a session. A session is a contextual container used for generating recommendations. +Amazon Connect creates a new Amazon Q in Connect session for each contact on which Amazon Q +in Connect is enabled. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `name`: The name of the session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"description"`: The description. +- `"tagFilter"`: An object that can be used to specify Tag conditions. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_session( + assistantId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_session( + assistantId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_assistant(assistant_id) + delete_assistant(assistant_id, params::Dict{String,<:Any}) + +Deletes an assistant. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. + +""" +function delete_assistant(assistantId; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "DELETE", + "/assistants/$(assistantId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_assistant( + assistantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/assistants/$(assistantId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_assistant_association(assistant_association_id, assistant_id) + delete_assistant_association(assistant_association_id, assistant_id, params::Dict{String,<:Any}) + +Deletes an assistant association. + +# Arguments +- `assistant_association_id`: The identifier of the assistant association. Can be either + the ID or the ARN. URLs cannot contain the ARN. +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. + +""" +function delete_assistant_association( + assistantAssociationId, assistantId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/assistants/$(assistantId)/associations/$(assistantAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_assistant_association( + assistantAssociationId, + assistantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/assistants/$(assistantId)/associations/$(assistantAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_content(content_id, knowledge_base_id) + delete_content(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the content. + +# Arguments +- `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot + contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +""" +function delete_content( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_content( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_import_job(import_job_id, knowledge_base_id) + delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the quick response import job. + +# Arguments +- `import_job_id`: The identifier of the import job to be deleted. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function delete_import_job( + importJobId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_import_job( + importJobId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_knowledge_base(knowledge_base_id) + delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the knowledge base. When you use this API to delete an external knowledge base +such as Salesforce or ServiceNow, you must also delete the Amazon AppIntegrations +DataIntegration. This is because you can't reuse the DataIntegration after it's been +associated with an external knowledge base. However, you can delete and recreate it. See +DeleteDataIntegration and CreateDataIntegration in the Amazon AppIntegrations API +Reference. + +# Arguments +- `knowledge_base_id`: The knowledge base to delete content from. Can be either the ID or + the ARN. URLs cannot contain the ARN. + +""" +function delete_knowledge_base( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_knowledge_base( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_quick_response(knowledge_base_id, quick_response_id) + delete_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Deletes a quick response. + +# Arguments +- `knowledge_base_id`: The knowledge base from which the quick response is deleted. The + identifier of the knowledge base. +- `quick_response_id`: The identifier of the quick response to delete. + +""" +function delete_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_assistant(assistant_id) + get_assistant(assistant_id, params::Dict{String,<:Any}) + +Retrieves information about an assistant. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. + +""" +function get_assistant(assistantId; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "GET", + "/assistants/$(assistantId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_assistant( + assistantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/assistants/$(assistantId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_assistant_association(assistant_association_id, assistant_id) + get_assistant_association(assistant_association_id, assistant_id, params::Dict{String,<:Any}) + +Retrieves information about an assistant association. + +# Arguments +- `assistant_association_id`: The identifier of the assistant association. Can be either + the ID or the ARN. URLs cannot contain the ARN. +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. + +""" +function get_assistant_association( + assistantAssociationId, assistantId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/assistants/$(assistantId)/associations/$(assistantAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_assistant_association( + assistantAssociationId, + assistantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/assistants/$(assistantId)/associations/$(assistantAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_content(content_id, knowledge_base_id) + get_content(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Retrieves content, including a pre-signed URL to download the content. + +# Arguments +- `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot + contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. + +""" +function get_content( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_content_summary(content_id, knowledge_base_id) + get_content_summary(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Retrieves summary information about the content. + +# Arguments +- `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot + contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +""" +function get_content_summary( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/summary"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content_summary( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/summary", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_import_job(import_job_id, knowledge_base_id) + get_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) + +Retrieves the started import job. + +# Arguments +- `import_job_id`: The identifier of the import job to retrieve. +- `knowledge_base_id`: The identifier of the knowledge base that the import job belongs to. + +""" +function get_import_job( + importJobId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_import_job( + importJobId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_knowledge_base(knowledge_base_id) + get_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) + +Retrieves information about the knowledge base. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +""" +function get_knowledge_base( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_knowledge_base( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_quick_response(knowledge_base_id, quick_response_id) + get_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Retrieves the quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should be a + QUICK_RESPONSES type knowledge base. +- `quick_response_id`: The identifier of the quick response. + +""" +function get_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_recommendations(assistant_id, session_id) + get_recommendations(assistant_id, session_id, params::Dict{String,<:Any}) + + This API will be discontinued starting June 1, 2024. To receive generative responses after +March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and +integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your +applications. Retrieves recommendations for the specified session. To avoid retrieving the +same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API +supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the +default behavior and only returns recommendations already available. To perform a manual +query against an assistant, use QueryAssistant. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `session_id`: The identifier of the session. Can be either the ID or the ARN. URLs cannot + contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"waitTimeSeconds"`: The duration (in seconds) for which the call waits for a + recommendation to be made available before returning. If a recommendation is available, the + call returns sooner than WaitTimeSeconds. If no messages are available and the wait time + expires, the call returns successfully with an empty list. +""" +function get_recommendations( + assistantId, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/assistants/$(assistantId)/sessions/$(sessionId)/recommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_recommendations( + assistantId, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/assistants/$(assistantId)/sessions/$(sessionId)/recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_session(assistant_id, session_id) + get_session(assistant_id, session_id, params::Dict{String,<:Any}) + +Retrieves information for a specified session. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `session_id`: The identifier of the session. Can be either the ID or the ARN. URLs cannot + contain the ARN. + +""" +function get_session( + assistantId, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/assistants/$(assistantId)/sessions/$(sessionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_session( + assistantId, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/assistants/$(assistantId)/sessions/$(sessionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_assistant_associations(assistant_id) + list_assistant_associations(assistant_id, params::Dict{String,<:Any}) + +Lists information about assistant associations. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_assistant_associations( + assistantId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/assistants/$(assistantId)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_assistant_associations( + assistantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/assistants/$(assistantId)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_assistants() + list_assistants(params::Dict{String,<:Any}) + +Lists information about assistants. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_assistants(; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "GET", "/assistants"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_assistants( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", "/assistants", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_contents(knowledge_base_id) + list_contents(knowledge_base_id, params::Dict{String,<:Any}) + +Lists the content. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_contents(knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_contents( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_import_jobs(knowledge_base_id) + list_import_jobs(knowledge_base_id, params::Dict{String,<:Any}) + +Lists information about import jobs. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_import_jobs( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_import_jobs( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_knowledge_bases() + list_knowledge_bases(params::Dict{String,<:Any}) + +Lists the knowledge bases. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_knowledge_bases(; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "GET", "/knowledgeBases"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_knowledge_bases( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_quick_responses(knowledge_base_id) + list_quick_responses(knowledge_base_id, params::Dict{String,<:Any}) + +Lists information about quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_quick_responses( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_quick_responses( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags for the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + notify_recommendations_received(assistant_id, recommendation_ids, session_id) + notify_recommendations_received(assistant_id, recommendation_ids, session_id, params::Dict{String,<:Any}) + +Removes the specified recommendations from the specified assistant's queue of newly +available recommendations. You can use this API in conjunction with GetRecommendations and +a waitTimeSeconds input for long-polling behavior and avoiding duplicate recommendations. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `recommendation_ids`: The identifiers of the recommendations. +- `session_id`: The identifier of the session. Can be either the ID or the ARN. URLs cannot + contain the ARN. + +""" +function notify_recommendations_received( + assistantId, + recommendationIds, + sessionId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions/$(sessionId)/recommendations/notify", + Dict{String,Any}("recommendationIds" => recommendationIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function notify_recommendations_received( + assistantId, + recommendationIds, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions/$(sessionId)/recommendations/notify", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("recommendationIds" => recommendationIds), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_feedback(assistant_id, content_feedback, target_id, target_type) + put_feedback(assistant_id, content_feedback, target_id, target_type, params::Dict{String,<:Any}) + +Provides feedback against the specified assistant for the specified target. This API only +supports generative targets. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. +- `content_feedback`: Information about the feedback provided. +- `target_id`: The identifier of the feedback target. +- `target_type`: The type of the feedback target. + +""" +function put_feedback( + assistantId, + contentFeedback, + targetId, + targetType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "PUT", + "/assistants/$(assistantId)/feedback", + Dict{String,Any}( + "contentFeedback" => contentFeedback, + "targetId" => targetId, + "targetType" => targetType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_feedback( + assistantId, + contentFeedback, + targetId, + targetType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "PUT", + "/assistants/$(assistantId)/feedback", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "contentFeedback" => contentFeedback, + "targetId" => targetId, + "targetType" => targetType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + query_assistant(assistant_id, query_text) + query_assistant(assistant_id, query_text, params::Dict{String,<:Any}) + + This API will be discontinued starting June 1, 2024. To receive generative responses after +March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and +integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your +applications. Performs a manual search against the specified assistant. To retrieve +recommendations for an assistant, use GetRecommendations. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `query_text`: The text to search for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"queryCondition"`: Information about how to query content. +- `"sessionId"`: The identifier of the Amazon Q in Connect session. Can be either the ID or + the ARN. URLs cannot contain the ARN. +""" +function query_assistant( + assistantId, queryText; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/assistants/$(assistantId)/query", + Dict{String,Any}("queryText" => queryText); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function query_assistant( + assistantId, + queryText, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/query", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("queryText" => queryText), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + remove_knowledge_base_template_uri(knowledge_base_id) + remove_knowledge_base_template_uri(knowledge_base_id, params::Dict{String,<:Any}) + +Removes a URI template from a knowledge base. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +""" +function remove_knowledge_base_template_uri( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/templateUri"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_knowledge_base_template_uri( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/templateUri", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_content(knowledge_base_id, search_expression) + search_content(knowledge_base_id, search_expression, params::Dict{String,<:Any}) + +Searches for content in a specified knowledge base. Can be used to get a specific content +resource by its name. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. +- `search_expression`: The search expression to filter results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function search_content( + knowledgeBaseId, searchExpression; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search", + Dict{String,Any}("searchExpression" => searchExpression); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_content( + knowledgeBaseId, + searchExpression, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("searchExpression" => searchExpression), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_quick_responses(knowledge_base_id, search_expression) + search_quick_responses(knowledge_base_id, search_expression, params::Dict{String,<:Any}) + +Searches existing Amazon Q in Connect quick responses in an Amazon Q in Connect knowledge +base. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. +- `search_expression`: The search expression for querying the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attributes"`: The user-defined Amazon Connect contact attributes to be resolved when + search results are returned. +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function search_quick_responses( + knowledgeBaseId, searchExpression; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search/quickResponses", + Dict{String,Any}("searchExpression" => searchExpression); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_quick_responses( + knowledgeBaseId, + searchExpression, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search/quickResponses", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("searchExpression" => searchExpression), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_sessions(assistant_id, search_expression) + search_sessions(assistant_id, search_expression, params::Dict{String,<:Any}) + +Searches for sessions. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `search_expression`: The search expression to filter results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function search_sessions( + assistantId, searchExpression; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/assistants/$(assistantId)/searchSessions", + Dict{String,Any}("searchExpression" => searchExpression); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_sessions( + assistantId, + searchExpression, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/searchSessions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("searchExpression" => searchExpression), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_content_upload(content_type, knowledge_base_id) + start_content_upload(content_type, knowledge_base_id, params::Dict{String,<:Any}) + +Get a URL to upload content to a knowledge base. To upload content, first make a PUT +request to the returned URL with your file, making sure to include the required headers. +Then use CreateContent to finalize the content creation process or UpdateContent to modify +an existing resource. You can only upload content to a knowledge base of type CUSTOM. + +# Arguments +- `content_type`: The type of content to upload. +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"presignedUrlTimeToLive"`: The expected expiration time of the generated presigned URL, + specified in minutes. +""" +function start_content_upload( + contentType, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/upload", + Dict{String,Any}("contentType" => contentType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_content_upload( + contentType, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/upload", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("contentType" => contentType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_import_job(import_job_type, knowledge_base_id, upload_id) + start_import_job(import_job_type, knowledge_base_id, upload_id, params::Dict{String,<:Any}) + +Start an asynchronous job to import Amazon Q in Connect resources from an uploaded source +file. Before calling this API, use StartContentUpload to upload an asset that contains the +resource data. For importing Amazon Q in Connect quick responses, you need to upload a +csv file including the quick responses. For information about how to format the csv file +for importing quick responses, see Import quick responses. + +# Arguments +- `import_job_type`: The type of the import job. For importing quick response resource, + set the value to QUICK_RESPONSES. +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. For importing Amazon Q in Connect quick responses, this + should be a QUICK_RESPONSES type knowledge base. +- `upload_id`: A pointer to the uploaded asset. This value is returned by + StartContentUpload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The tags used to organize, track, or control access for this resource. +- `"externalSourceConfiguration"`: The configuration information of the external source + that the resource data are imported from. +- `"metadata"`: The metadata fields of the imported Amazon Q in Connect resources. +""" +function start_import_job( + importJobType, + knowledgeBaseId, + uploadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + Dict{String,Any}( + "importJobType" => importJobType, + "uploadId" => uploadId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_import_job( + importJobType, + knowledgeBaseId, + uploadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "importJobType" => importJobType, + "uploadId" => uploadId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds the specified tags to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tags`: The tags used to organize, track, or control access for this resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qconnect( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes the specified tags from the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tag_keys`: The tag keys. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_content(content_id, knowledge_base_id) + update_content(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Updates information about the content. + +# Arguments +- `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot + contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"metadata"`: A key/value map to store attributes without affecting tagging or + recommendations. For example, when synchronizing data between an external system and Amazon + Q in Connect, you can store an external version identifier as metadata to utilize for + determining drift. +- `"overrideLinkOutUri"`: The URI for the article. If the knowledge base has a templateUri, + setting this argument overrides it for this piece of content. To remove an existing + overrideLinkOurUri, exclude this argument and set removeOverrideLinkOutUri to true. +- `"removeOverrideLinkOutUri"`: Unset the existing overrideLinkOutUri if it exists. +- `"revisionId"`: The revisionId of the content resource to update, taken from an earlier + call to GetContent, GetContentSummary, SearchContent, or ListContents. If included, this + argument acts as an optimistic lock to ensure content was not modified since it was last + read. If it has been modified, this API throws a PreconditionFailedException. +- `"title"`: The title of the content. +- `"uploadId"`: A pointer to the uploaded asset. This value is returned by + StartContentUpload. +""" +function update_content( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_content( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_knowledge_base_template_uri(knowledge_base_id, template_uri) + update_knowledge_base_template_uri(knowledge_base_id, template_uri, params::Dict{String,<:Any}) + +Updates the template URI of a knowledge base. This is only supported for knowledge bases of +type EXTERNAL. Include a single variable in {variable} format; this interpolated by Amazon +Q in Connect using ingested content. For example, if you ingest a Salesforce article, it +has an Id value, and you can set the template URI to +https://myInstanceName.lightning.force.com/lightning/r/Knowledge__kav/*{Id}*/view. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. +- `template_uri`: The template URI to update. + +""" +function update_knowledge_base_template_uri( + knowledgeBaseId, templateUri; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/templateUri", + Dict{String,Any}("templateUri" => templateUri); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_knowledge_base_template_uri( + knowledgeBaseId, + templateUri, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/templateUri", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("templateUri" => templateUri), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_quick_response(knowledge_base_id, quick_response_id) + update_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q in Connect quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the + ARN. URLs cannot contain the ARN. +- `quick_response_id`: The identifier of the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"channels"`: The Amazon Connect contact channels this quick response applies to. The + supported contact channel types include Chat. +- `"content"`: The updated content of the quick response. +- `"contentType"`: The media type of the quick response content. Use + application/x.quickresponse;format=plain for quick response written in plain text. Use + application/x.quickresponse;format=markdown for quick response written in richtext. +- `"description"`: The updated description of the quick response. +- `"groupingConfiguration"`: The updated grouping configuration of the quick response. +- `"isActive"`: Whether the quick response is active. +- `"language"`: The language code value for the language in which the quick response is + written. The supported language codes include de_DE, en_US, es_ES, fr_FR, id_ID, it_IT, + ja_JP, ko_KR, pt_BR, zh_CN, zh_TW +- `"name"`: The name of the quick response. +- `"removeDescription"`: Whether to remove the description from the quick response. +- `"removeGroupingConfiguration"`: Whether to remove the grouping configuration of the + quick response. +- `"removeShortcutKey"`: Whether to remove the shortcut key of the quick response. +- `"shortcutKey"`: The shortcut key of the quick response. The value should be unique + across the knowledge base. +""" +function update_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_session(assistant_id, session_id) + update_session(assistant_id, session_id, params::Dict{String,<:Any}) + +Updates a session. A session is a contextual container used for generating recommendations. +Amazon Connect updates the existing Amazon Q in Connect session for each contact on which +Amazon Q in Connect is enabled. + +# Arguments +- `assistant_id`: The identifier of the Amazon Q in Connect assistant. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `session_id`: The identifier of the session. Can be either the ID or the ARN. URLs cannot + contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description. +- `"tagFilter"`: An object that can be used to specify Tag conditions. +""" +function update_session( + assistantId, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions/$(sessionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_session( + assistantId, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/assistants/$(assistantId)/sessions/$(sessionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/quicksight.jl b/src/services/quicksight.jl index 1cc0d8f462..6b911be5f0 100644 --- a/src/services/quicksight.jl +++ b/src/services/quicksight.jl @@ -110,42 +110,37 @@ function create_account_customization( end """ - create_account_subscription(account_name, authentication_method, aws_account_id, edition, notification_email) - create_account_subscription(account_name, authentication_method, aws_account_id, edition, notification_email, params::Dict{String,<:Any}) + create_account_subscription(account_name, authentication_method, aws_account_id, notification_email) + create_account_subscription(account_name, authentication_method, aws_account_id, notification_email, params::Dict{String,<:Any}) Creates an Amazon QuickSight account, or subscribes to Amazon QuickSight Q. The Amazon Web -Services Region for the account is derived from what is configured in the CLI or SDK. This -operation isn't supported in the US East (Ohio) Region, South America (Sao Paulo) Region, -or Asia Pacific (Singapore) Region. Before you use this operation, make sure that you can -connect to an existing Amazon Web Services account. If you don't have an Amazon Web -Services account, see Sign up for Amazon Web Services in the Amazon QuickSight User Guide. -The person who signs up for Amazon QuickSight needs to have the correct Identity and Access -Management (IAM) permissions. For more information, see IAM Policy Examples for Amazon -QuickSight in the Amazon QuickSight User Guide. If your IAM policy includes both the -Subscribe and CreateAccountSubscription actions, make sure that both actions are set to -Allow. If either action is set to Deny, the Deny action prevails and your API call fails. -You can't pass an existing IAM role to access other Amazon Web Services services using this -API operation. To pass your existing IAM role to Amazon QuickSight, see Passing IAM roles -to Amazon QuickSight in the Amazon QuickSight User Guide. You can't set default resource -access on the new account from the Amazon QuickSight API. Instead, add default resource -access from the Amazon QuickSight console. For more information about setting default -resource access to Amazon Web Services services, see Setting default resource access to -Amazon Web Services services in the Amazon QuickSight User Guide. +Services Region for the account is derived from what is configured in the CLI or SDK. +Before you use this operation, make sure that you can connect to an existing Amazon Web +Services account. If you don't have an Amazon Web Services account, see Sign up for Amazon +Web Services in the Amazon QuickSight User Guide. The person who signs up for Amazon +QuickSight needs to have the correct Identity and Access Management (IAM) permissions. For +more information, see IAM Policy Examples for Amazon QuickSight in the Amazon QuickSight +User Guide. If your IAM policy includes both the Subscribe and CreateAccountSubscription +actions, make sure that both actions are set to Allow. If either action is set to Deny, the +Deny action prevails and your API call fails. You can't pass an existing IAM role to access +other Amazon Web Services services using this API operation. To pass your existing IAM role +to Amazon QuickSight, see Passing IAM roles to Amazon QuickSight in the Amazon QuickSight +User Guide. You can't set default resource access on the new account from the Amazon +QuickSight API. Instead, add default resource access from the Amazon QuickSight console. +For more information about setting default resource access to Amazon Web Services services, +see Setting default resource access to Amazon Web Services services in the Amazon +QuickSight User Guide. # Arguments - `account_name`: The name of your Amazon QuickSight account. This name is unique over all of Amazon Web Services, and it appears only when users sign in. You can't change AccountName value after the Amazon QuickSight account is created. - `authentication_method`: The method that you want to use to authenticate your Amazon - QuickSight account. Currently, the valid values for this parameter are IAM_AND_QUICKSIGHT, - IAM_ONLY, and ACTIVE_DIRECTORY. If you choose ACTIVE_DIRECTORY, provide an - ActiveDirectoryName and an AdminGroup associated with your Active Directory. + QuickSight account. If you choose ACTIVE_DIRECTORY, provide an ActiveDirectoryName and an + AdminGroup associated with your Active Directory. If you choose IAM_IDENTITY_CENTER, + provide an AdminGroup associated with your IAM Identity Center account. - `aws_account_id`: The Amazon Web Services account ID of the account that you're using to create your Amazon QuickSight account. -- `edition`: The edition of Amazon QuickSight that you want your account to have. - Currently, you can choose from ENTERPRISE or ENTERPRISE_AND_Q. If you choose - ENTERPRISE_AND_Q, the following parameters are required: FirstName LastName - EmailAddress ContactNumber - `notification_email`: The email address that you want Amazon QuickSight to send notifications to regarding your Amazon QuickSight account or Amazon QuickSight subscription. @@ -153,31 +148,66 @@ Amazon Web Services services in the Amazon QuickSight User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ActiveDirectoryName"`: The name of your Active Directory. This field is required if ACTIVE_DIRECTORY is the selected authentication method of the new Amazon QuickSight account. -- `"AdminGroup"`: The admin group associated with your Active Directory. This field is - required if ACTIVE_DIRECTORY is the selected authentication method of the new Amazon - QuickSight account. For more information about using Active Directory in Amazon QuickSight, - see Using Active Directory with Amazon QuickSight Enterprise Edition in the Amazon - QuickSight User Guide. -- `"AuthorGroup"`: The author group associated with your Active Directory. For more - information about using Active Directory in Amazon QuickSight, see Using Active Directory - with Amazon QuickSight Enterprise Edition in the Amazon QuickSight User Guide. +- `"AdminGroup"`: The admin group associated with your Active Directory or IAM Identity + Center account. Either this field or the AdminProGroup field is required if + ACTIVE_DIRECTORY or IAM_IDENTITY_CENTER is the selected authentication method of the new + Amazon QuickSight account. For more information about using IAM Identity Center in Amazon + QuickSight, see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. +- `"AdminProGroup"`: The admin pro group associated with your Active Directory or IAM + Identity Center account. Either this field or the AdminGroup field is required if + ACTIVE_DIRECTORY or IAM_IDENTITY_CENTER is the selected authentication method of the new + Amazon QuickSight account. For more information about using IAM Identity Center in Amazon + QuickSight, see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. +- `"AuthorGroup"`: The author group associated with your Active Directory or IAM Identity + Center account. For more information about using IAM Identity Center in Amazon QuickSight, + see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the Amazon + QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. +- `"AuthorProGroup"`: The author pro group associated with your Active Directory or IAM + Identity Center account. For more information about using IAM Identity Center in Amazon + QuickSight, see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. - `"ContactNumber"`: A 10-digit phone number for the author of the Amazon QuickSight account to use for future communications. This field is required if ENTERPPRISE_AND_Q is the selected edition of the new Amazon QuickSight account. - `"DirectoryId"`: The ID of the Active Directory that is associated with your Amazon QuickSight account. +- `"Edition"`: The edition of Amazon QuickSight that you want your account to have. + Currently, you can choose from ENTERPRISE or ENTERPRISE_AND_Q. If you choose + ENTERPRISE_AND_Q, the following parameters are required: FirstName LastName + EmailAddress ContactNumber - `"EmailAddress"`: The email address of the author of the Amazon QuickSight account to use for future communications. This field is required if ENTERPPRISE_AND_Q is the selected edition of the new Amazon QuickSight account. - `"FirstName"`: The first name of the author of the Amazon QuickSight account to use for future communications. This field is required if ENTERPPRISE_AND_Q is the selected edition of the new Amazon QuickSight account. +- `"IAMIdentityCenterInstanceArn"`: The Amazon Resource Name (ARN) for the IAM Identity + Center instance. - `"LastName"`: The last name of the author of the Amazon QuickSight account to use for future communications. This field is required if ENTERPPRISE_AND_Q is the selected edition of the new Amazon QuickSight account. -- `"ReaderGroup"`: The reader group associated with your Active Direcrtory. For more - information about using Active Directory in Amazon QuickSight, see Using Active Directory - with Amazon QuickSight Enterprise Edition in the Amazon QuickSight User Guide. +- `"ReaderGroup"`: The reader group associated with your Active Directory or IAM Identity + Center account. For more information about using IAM Identity Center in Amazon QuickSight, + see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the Amazon + QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. +- `"ReaderProGroup"`: The reader pro group associated with your Active Directory or IAM + Identity Center account. For more information about using IAM Identity Center in Amazon + QuickSight, see Using IAM Identity Center with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. For more information about using Active Directory in Amazon + QuickSight, see Using Active Directory with Amazon QuickSight Enterprise Edition in the + Amazon QuickSight User Guide. - `"Realm"`: The realm of the Active Directory that is associated with your Amazon QuickSight account. This field is required if ACTIVE_DIRECTORY is the selected authentication method of the new Amazon QuickSight account. @@ -186,7 +216,6 @@ function create_account_subscription( AccountName, AuthenticationMethod, AwsAccountId, - Edition, NotificationEmail; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -196,7 +225,6 @@ function create_account_subscription( Dict{String,Any}( "AccountName" => AccountName, "AuthenticationMethod" => AuthenticationMethod, - "Edition" => Edition, "NotificationEmail" => NotificationEmail, ); aws_config=aws_config, @@ -207,7 +235,6 @@ function create_account_subscription( AccountName, AuthenticationMethod, AwsAccountId, - Edition, NotificationEmail, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -221,7 +248,6 @@ function create_account_subscription( Dict{String,Any}( "AccountName" => AccountName, "AuthenticationMethod" => AuthenticationMethod, - "Edition" => Edition, "NotificationEmail" => NotificationEmail, ), params, @@ -252,6 +278,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Definition"`: The definition of an analysis. A definition is the data model of all features in a Dashboard, Template, or Analysis. Either a SourceEntity or a Definition must be provided in order for the request to be valid. +- `"FolderArns"`: When you create the analysis, Amazon QuickSight adds the analysis to + these folders. - `"Parameters"`: The parameter names and override values that you want to use. An analysis can have any parameter type, and some parameters might accept multiple values. - `"Permissions"`: A structure that describes the principals and the resource-level @@ -267,6 +295,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the analysis. - `"ThemeArn"`: The ARN for the theme to apply to the analysis that you're creating. To see the theme in the Amazon QuickSight console, make sure that you have access to it. +- `"ValidationStrategy"`: The option to relax the validation needed to create an analysis + with definition objects. This skips the validation step for specific errors. """ function create_analysis( AnalysisId, AwsAccountId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -326,6 +356,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Definition"`: The definition of a dashboard. A definition is the data model of all features in a Dashboard, Template, or Analysis. Either a SourceEntity or a Definition must be provided in order for the request to be valid. +- `"FolderArns"`: When you create the dashboard, Amazon QuickSight adds the dashboard to + these folders. +- `"LinkEntities"`: A list of analysis Amazon Resource Names (ARNs) to be linked to the + dashboard. +- `"LinkSharingConfiguration"`: A structure that contains the permissions of a shareable + link to the dashboard. - `"Parameters"`: The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values. @@ -348,6 +384,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys dashboard. If you add a value for this field, it overrides the value that is used in the source entity. The theme ARN must exist in the same Amazon Web Services account where you create the dashboard. +- `"ValidationStrategy"`: The option to relax the validation needed to create a dashboard + with definition objects. This option skips the validation step for specific errors. - `"VersionDescription"`: A description for the first version of the dashboard being created. """ @@ -403,6 +441,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DataSetUsageConfiguration"`: - `"DatasetParameters"`: The parameter declarations of the dataset. - `"FieldFolders"`: The folder that contains fields and nested subfolders for your dataset. +- `"FolderArns"`: When you create the dataset, Amazon QuickSight adds the dataset to these + folders. - `"LogicalTableMap"`: Configures the combination and transformation of the data from the physical tables. - `"Permissions"`: A list of resource permissions on the dataset. @@ -485,6 +525,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported. - `"DataSourceParameters"`: The parameters that Amazon QuickSight uses to connect to your underlying source. +- `"FolderArns"`: When you create the data source, Amazon QuickSight adds the data source + to these folders. - `"Permissions"`: A list of resource permissions on the data source. - `"SslProperties"`: Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying source. @@ -552,6 +594,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ParentFolderArn can be null. An empty parentFolderArn creates a root-level folder. - `"Permissions"`: A structure that describes the principals and the resource-level permissions of a folder. To specify no permissions, omit Permissions. +- `"SharingModel"`: An optional parameter that determines the sharing scope of the folder. + The default value for this parameter is ACCOUNT. - `"Tags"`: Tags for the folder. """ function create_folder( @@ -588,8 +632,8 @@ Adds an asset, such as a dashboard, analysis, or dataset into a folder. # Arguments - `aws_account_id`: The ID for the Amazon Web Services account that contains the folder. - `folder_id`: The ID of the folder. -- `member_id`: The ID of the asset (the dashboard, analysis, or dataset). -- `member_type`: The type of the member, including DASHBOARD, ANALYSIS, and DATASET. +- `member_id`: The ID of the asset that you want to add to the folder. +- `member_type`: The member type of the asset that you want to add to a folder. """ function create_folder_membership( @@ -629,7 +673,7 @@ end Use the CreateGroup operation to create a group in Amazon QuickSight. You can create up to 10,000 groups in a namespace. If you want to create more than 10,000 groups in a namespace, -contact AWS Support. The permissions resource is +contact Amazon Web Services Support. The permissions resource is arn:aws:quicksight:<your-region>:<relevant-aws-account-id>:group/default/<gro up-name> . The response is a group object. @@ -941,6 +985,52 @@ function create_refresh_schedule( ) end +""" + create_role_membership(aws_account_id, member_name, namespace, role) + create_role_membership(aws_account_id, member_name, namespace, role, params::Dict{String,<:Any}) + +Use CreateRoleMembership to add an existing Amazon QuickSight group to an existing role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that you want to create a + group in. The Amazon Web Services account ID that you provide must be the same Amazon Web + Services account that contains your Amazon QuickSight account. +- `member_name`: The name of the group that you want to add to the role. +- `namespace`: The namespace that the role belongs to. +- `role`: The role that you want to add a group to. + +""" +function create_role_membership( + AwsAccountId, + MemberName, + Namespace, + Role; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members/$(MemberName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_role_membership( + AwsAccountId, + MemberName, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members/$(MemberName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_template(aws_account_id, template_id) create_template(aws_account_id, template_id, params::Dict{String,<:Any}) @@ -979,6 +1069,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys request to be valid. - `"Tags"`: Contains a map of the key-value pairs for the resource tag or tags assigned to the resource. +- `"ValidationStrategy"`: TThe option to relax the validation needed to create a template + with definition objects. This skips the validation step for specific errors. - `"VersionDescription"`: A description of the current template version being created. This API operation creates the first version of the template. Every time UpdateTemplate is called, a new version is created. Each version of the template maintains a description of @@ -1703,9 +1795,8 @@ Removes an asset, such as a dashboard, analysis, or dataset, from a folder. # Arguments - `aws_account_id`: The ID for the Amazon Web Services account that contains the folder. - `folder_id`: The Folder ID. -- `member_id`: The ID of the asset (the dashboard, analysis, or dataset) that you want to - delete. -- `member_type`: The type of the member, including DASHBOARD, ANALYSIS, and DATASET +- `member_id`: The ID of the asset that you want to delete. +- `member_type`: The member type of the asset that you want to delete from a folder. """ function delete_folder_membership( @@ -1867,6 +1958,46 @@ function delete_iampolicy_assignment( ) end +""" + delete_identity_propagation_config(aws_account_id, service) + delete_identity_propagation_config(aws_account_id, service, params::Dict{String,<:Any}) + +Deletes all access scopes and authorized targets that are associated with a service from +the Amazon QuickSight IAM Identity Center application. This operation is only supported for +Amazon QuickSight accounts that use IAM Identity Center. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that you want to delete an + identity propagation configuration from. +- `service`: The name of the Amazon Web Services service that you want to delete the + associated access scopes and authorized targets from. + +""" +function delete_identity_propagation_config( + AwsAccountId, Service; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/identity-propagation-config/$(Service)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_identity_propagation_config( + AwsAccountId, + Service, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/identity-propagation-config/$(Service)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_namespace(aws_account_id, namespace) delete_namespace(aws_account_id, namespace, params::Dict{String,<:Any}) @@ -1945,6 +2076,92 @@ function delete_refresh_schedule( ) end +""" + delete_role_custom_permission(aws_account_id, namespace, role) + delete_role_custom_permission(aws_account_id, namespace, role, params::Dict{String,<:Any}) + +Removes custom permissions from the role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that the group is in. + Currently, you use the ID for the Amazon Web Services account that contains your Amazon + QuickSight account. +- `namespace`: The namespace that includes the role. +- `role`: The role that you want to remove permissions from. + +""" +function delete_role_custom_permission( + AwsAccountId, Namespace, Role; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_role_custom_permission( + AwsAccountId, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_role_membership(aws_account_id, member_name, namespace, role) + delete_role_membership(aws_account_id, member_name, namespace, role, params::Dict{String,<:Any}) + +Removes a group from a role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that you want to create a + group in. The Amazon Web Services account ID that you provide must be the same Amazon Web + Services account that contains your Amazon QuickSight account. +- `member_name`: The name of the group. +- `namespace`: The namespace that contains the role. +- `role`: The role that you want to remove permissions from. + +""" +function delete_role_membership( + AwsAccountId, + MemberName, + Namespace, + Role; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members/$(MemberName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_role_membership( + AwsAccountId, + MemberName, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "DELETE", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members/$(MemberName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_template(aws_account_id, template_id) delete_template(aws_account_id, template_id, params::Dict{String,<:Any}) @@ -2774,6 +2991,97 @@ function describe_dashboard_permissions( ) end +""" + describe_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_job_id) + describe_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_job_id, params::Dict{String,<:Any}) + +Describes an existing snapshot job. Poll job descriptions after a job starts to know the +status of the job. For information on available status codes, see JobStatus. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you have started a snapshot job for. +- `snapshot_job_id`: The ID of the job to be described. The job ID is set when you start a + new job with a StartDashboardSnapshotJob API call. + +""" +function describe_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotJobId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_dashboard_snapshot_job_result(aws_account_id, dashboard_id, snapshot_job_id) + describe_dashboard_snapshot_job_result(aws_account_id, dashboard_id, snapshot_job_id, params::Dict{String,<:Any}) + +Describes the result of an existing snapshot job that has finished running. A finished +snapshot job will return a COMPLETED or FAILED status when you poll the job with a +DescribeDashboardSnapshotJob API call. If the job has not finished running, this operation +returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not +reached a terminal state.. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you have started a snapshot job for. +- `snapshot_job_id`: The ID of the job to be described. The job ID is set when you start a + new job with a StartDashboardSnapshotJob API call. + +""" +function describe_dashboard_snapshot_job_result( + AwsAccountId, + DashboardId, + SnapshotJobId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)/result"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dashboard_snapshot_job_result( + AwsAccountId, + DashboardId, + SnapshotJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)/result", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_data_set(aws_account_id, data_set_id) describe_data_set(aws_account_id, data_set_id, params::Dict{String,<:Any}) @@ -3006,6 +3314,11 @@ Describes permissions for a folder. - `aws_account_id`: The ID for the Amazon Web Services account that contains the folder. - `folder_id`: The ID of the folder. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of results to be returned per request. +- `"namespace"`: The namespace of the folder whose permissions you want described. +- `"next-token"`: A pagination token for the next set of results. """ function describe_folder_permissions( AwsAccountId, FolderId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3043,6 +3356,11 @@ permissions and the inherited permissions from the ancestor folders. - `aws_account_id`: The ID for the Amazon Web Services account that contains the folder. - `folder_id`: The ID of the folder. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of results to be returned per request. +- `"namespace"`: The namespace of the folder whose permissions you want described. +- `"next-token"`: A pagination token for the next set of results. """ function describe_folder_resolved_permissions( AwsAccountId, FolderId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3271,6 +3589,44 @@ function describe_ip_restriction( ) end +""" + describe_key_registration(aws_account_id) + describe_key_registration(aws_account_id, params::Dict{String,<:Any}) + +Describes all customer managed key registrations in a Amazon QuickSight account. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contains the customer + managed key registration that you want to describe. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"default-key-only"`: Determines whether the request returns the default key only. +""" +function describe_key_registration( + AwsAccountId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/key-registration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_key_registration( + AwsAccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/key-registration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_namespace(aws_account_id, namespace) describe_namespace(aws_account_id, namespace, params::Dict{String,<:Any}) @@ -3346,6 +3702,46 @@ function describe_refresh_schedule( ) end +""" + describe_role_custom_permission(aws_account_id, namespace, role) + describe_role_custom_permission(aws_account_id, namespace, role, params::Dict{String,<:Any}) + +Describes all custom permissions that are mapped to a role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that you want to create a + group in. The Amazon Web Services account ID that you provide must be the same Amazon Web + Services account that contains your Amazon QuickSight account. +- `namespace`: The namespace that contains the role. +- `role`: The name of the role whose permissions you want described. + +""" +function describe_role_custom_permission( + AwsAccountId, Namespace, Role; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_role_custom_permission( + AwsAccountId, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_template(aws_account_id, template_id) describe_template(aws_account_id, template_id, params::Dict{String,<:Any}) @@ -3893,8 +4289,9 @@ QuickSight Developer Portal. - `authorized_resource_arns`: The Amazon Resource Names (ARNs) for the Amazon QuickSight resources that the user is authorized to access during the lifetime of the session. If you choose Dashboard embedding experience, pass the list of dashboard ARNs in the account that - you want the user to be able to view. Currently, you can pass up to 25 dashboard ARNs in - each API call. + you want the user to be able to view. If you want to make changes to the theme of your + embedded content, pass a list of theme ARNs that the anonymous users need access to. + Currently, you can pass up to 25 theme ARNs in each API call. - `aws_account_id`: The ID for the Amazon Web Services account that contains the dashboard that you're embedding. - `experience_configuration`: The configuration of the experience that you are embedding. @@ -3986,9 +4383,10 @@ ways you can customize embedding, visit the Amazon QuickSight Developer Portal. # Arguments - `aws_account_id`: The ID for the Amazon Web Services account that contains the dashboard that you're embedding. -- `experience_configuration`: The experience you are embedding. For registered users, you - can embed Amazon QuickSight dashboards, Amazon QuickSight visuals, the Amazon QuickSight Q - search bar, or the entire Amazon QuickSight console. +- `experience_configuration`: The experience that you want to embed. For registered users, + you can embed Amazon QuickSight dashboards, Amazon QuickSight visuals, the Amazon + QuickSight Q search bar, the Amazon QuickSight Generative Q&A experience, or the entire + Amazon QuickSight console. - `user_arn`: The Amazon Resource Name for the registered user. # Optional Parameters @@ -4718,6 +5116,48 @@ function list_iampolicy_assignments_for_user( ) end +""" + list_identity_propagation_configs(aws_account_id) + list_identity_propagation_configs(aws_account_id, params::Dict{String,<:Any}) + +Lists all services and authorized targets that the Amazon QuickSight IAM Identity Center +application can access. This operation is only supported for Amazon QuickSight accounts +that use IAM Identity Center. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contain the identity + propagation configurations of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of results to be returned. +- `"next-token"`: The token for the next set of results, or null if there are no more + results. +""" +function list_identity_propagation_configs( + AwsAccountId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/identity-propagation-config"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_identity_propagation_configs( + AwsAccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/identity-propagation-config", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_ingestions(aws_account_id, data_set_id) list_ingestions(aws_account_id, data_set_id, params::Dict{String,<:Any}) @@ -4838,6 +5278,50 @@ function list_refresh_schedules( ) end +""" + list_role_memberships(aws_account_id, namespace, role) + list_role_memberships(aws_account_id, namespace, role, params::Dict{String,<:Any}) + +Lists all groups that are associated with a role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that you want to create a + group in. The Amazon Web Services account ID that you provide must be the same Amazon Web + Services account that contains your Amazon QuickSight account. +- `namespace`: The namespace that includes the role. +- `role`: The name of the role. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of results to return. +- `"next-token"`: A pagination token that can be used in a subsequent request. +""" +function list_role_memberships( + AwsAccountId, Namespace, Role; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_role_memberships( + AwsAccountId, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/members", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -5389,17 +5873,22 @@ see Inviting users to access Amazon QuickSight. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account. - `email`: The email address of the user that you want to register. -- `identity_type`: Amazon QuickSight supports several ways of managing the identity of - users. This parameter accepts two values: IAM: A user whose identity maps to an existing - IAM user or role. QUICKSIGHT: A user whose identity is owned and managed internally by - Amazon QuickSight. +- `identity_type`: The identity type that your Amazon QuickSight account uses to manage the + identity of users. - `namespace`: The namespace. Currently, you should set this to default. - `user_role`: The Amazon QuickSight role for the user. The user role can be one of the following: READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and dashboards. ADMIN: A user who is an - author, who can also manage Amazon QuickSight settings. RESTRICTED_READER: This role - isn't currently available for use. RESTRICTED_AUTHOR: This role isn't currently - available for use. + author, who can also manage Amazon QuickSight settings. READER_PRO: Reader Pro adds + Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in + Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries + from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author + role. Author Pros can author dashboards with natural language with Amazon Q, build stories + with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. + ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight + administrative settings. Admin Pro users are billed at Author Pro pricing. + RESTRICTED_READER: This role isn't currently available for use. RESTRICTED_AUTHOR: This + role isn't currently available for use. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5418,8 +5907,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon QuickSight user. Amazon QuickSight custom permissions are applied through IAM policies. Therefore, they override the permissions typically granted by assigning Amazon QuickSight users to one of the default security cohorts in Amazon QuickSight (admin, - author, reader). This feature is available only to Amazon QuickSight Enterprise edition - subscriptions. + author, reader, admin pro, author pro, reader pro). This feature is available only to + Amazon QuickSight Enterprise edition subscriptions. - `"ExternalLoginFederationProviderType"`: The type of supported external login provider that provides identity to let a user federate into Amazon QuickSight with an associated Identity and Access Management(IAM) role. The type of supported external login provider can @@ -5437,6 +5926,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role in the CLI Reference. +- `"Tags"`: The tags to associate with the user. - `"UserName"`: The Amazon QuickSight user name that you want to create for the user you are registering. """ @@ -5792,7 +6282,7 @@ QuickSight assets. You can also choose to export any asset dependencies in the s Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a -DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 10 +DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 5 export jobs concurrently. The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported. @@ -5818,6 +6308,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Dashboard ARN to the ResourceArns parameter. If you set IncludeAllDependencies to TRUE, any theme, dataset, and data source resource that is a dependency of the dashboard is also exported. +- `"IncludePermissions"`: A Boolean that determines whether all permissions for each + resource ARN are exported with the job. If you set IncludePermissions to TRUE, any + permissions associated with each resource are exported. +- `"IncludeTags"`: A Boolean that determines whether all tags for each resource ARN are + exported with the job. If you set IncludeTags to TRUE, any tags associated with each + resource are exported. +- `"ValidationStrategy"`: An optional parameter that determines which validation strategy + to use for the export job. If StrictModeForAllResources is set to TRUE, strict validation + for every error is enforced. If it is set to FALSE, validation is skipped for specific UI + errors that are shown as warnings. The default value for StrictModeForAllResources is FALSE. """ function start_asset_bundle_export_job( AssetBundleExportJobId, @@ -5873,7 +6373,7 @@ Starts an Asset Bundle import job. An Asset Bundle import job imports specified QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon -QuickSight account. Each Amazon QuickSight account can run up to 10 import jobs +QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently. The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported. @@ -5882,7 +6382,7 @@ the bundle file before the resources can be imported. - `asset_bundle_import_job_id`: The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job. - `asset_bundle_import_source`: The source of the asset bundle zip file that contains the - data that you want to import. + data that you want to import. The file must be in QUICKSIGHT_JSON format. - `aws_account_id`: The ID of the Amazon Web Services account to import assets into. # Optional Parameters @@ -5891,8 +6391,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys import jobs will attempt to undo any asset changes caused by the failed job. If you choose DO_NOTHING, failed import jobs will not attempt to roll back any asset changes caused by the failed job, possibly keeping the Amazon QuickSight account in an inconsistent state. -- `"OverrideParameters"`: Optional overrides to be applied to the resource configuration +- `"OverrideParameters"`: Optional overrides that are applied to the resource configuration + before import. +- `"OverridePermissions"`: Optional permission overrides that are applied to the resource + configuration before import. +- `"OverrideTags"`: Optional tag overrides that are applied to the resource configuration before import. +- `"OverrideValidationStrategy"`: An optional validation strategy override for all analyses + and dashboards that is applied to the resource configuration before import. """ function start_asset_bundle_import_job( AssetBundleImportJobId, @@ -5936,6 +6442,113 @@ function start_asset_bundle_import_job( ) end +""" + start_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_configuration, snapshot_job_id, user_configuration) + start_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_configuration, snapshot_job_id, user_configuration, params::Dict{String,<:Any}) + +Starts an asynchronous job that generates a snapshot of a dashboard's output. You can +request one or several of the following format configurations in each API call. 1 +Paginated PDF 1 Excel workbook that includes up to 5 table or pivot table visuals 5 +CSVs from table or pivot table visuals The status of a submitted job can be polled with +the DescribeDashboardSnapshotJob API. When you call the DescribeDashboardSnapshotJob API, +check the JobStatus field in the response. Once the job reaches a COMPLETED or FAILED +status, use the DescribeDashboardSnapshotJobResult API to obtain the URLs for the generated +files. If the job fails, the DescribeDashboardSnapshotJobResult API returns detailed +information about the error that occurred. StartDashboardSnapshotJob API throttling +Amazon QuickSight utilizes API throttling to create a more consistent user experience +within a time span for customers when they call the StartDashboardSnapshotJob. By default, +12 jobs can run simlutaneously in one Amazon Web Services account and users can submit up +10 API requests per second before an account is throttled. If an overwhelming number of API +requests are made by the same user in a short period of time, Amazon QuickSight throttles +the API calls to maintin an optimal experience and reliability for all Amazon QuickSight +users. Common throttling scenarios The following list provides information about the most +commin throttling scenarios that can occur. A large number of SnapshotExport API jobs +are running simultaneously on an Amazon Web Services account. When a new +StartDashboardSnapshotJob is created and there are already 12 jobs with the RUNNING status, +the new job request fails and returns a LimitExceededException error. Wait for a current +job to comlpete before you resubmit the new job. A large number of API requests are +submitted on an Amazon Web Services account. When a user makes more than 10 API calls to +the Amazon QuickSight API in one second, a ThrottlingException is returned. If your use +case requires a higher throttling limit, contact your account admin or Amazon Web +ServicesSupport to explore options to tailor a more optimal expereince for your account. +Best practices to handle throttling If your use case projects high levels of API traffic, +try to reduce the degree of frequency and parallelism of API calls as much as you can to +avoid throttling. You can also perform a timing test to calculate an estimate for the total +processing time of your projected load that stays within the throttling limits of the +Amazon QuickSight APIs. For example, if your projected traffic is 100 snapshot jobs before +12:00 PM per day, start 12 jobs in parallel and measure the amount of time it takes to +proccess all 12 jobs. Once you obtain the result, multiply the duration by 9, for example +(12 minutes * 9 = 108 minutes). Use the new result to determine the latest time at which +the jobs need to be started to meet your target deadline. The time that it takes to process +a job can be impacted by the following factors: The dataset type (Direct Query or SPICE). + The size of the dataset. The complexity of the calculated fields that are used in the +dashboard. The number of visuals that are on a sheet. The types of visuals that are on +the sheet. The number of formats and snapshots that are requested in the job +configuration. The size of the generated snapshots. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you want to start a snapshot job for. +- `snapshot_configuration`: A structure that describes the configuration of the dashboard + snapshot. +- `snapshot_job_id`: An ID for the dashboard snapshot job. This ID is unique to the + dashboard while the job is running. This ID can be used to poll the status of a job with a + DescribeDashboardSnapshotJob while the job runs. You can reuse this ID for another job 24 + hours after the current job is completed. +- `user_configuration`: A structure that contains information about the anonymous users + that the generated snapshot is for. This API will not return information about registered + Amazon QuickSight. + +""" +function start_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotConfiguration, + SnapshotJobId, + UserConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs", + Dict{String,Any}( + "SnapshotConfiguration" => SnapshotConfiguration, + "SnapshotJobId" => SnapshotJobId, + "UserConfiguration" => UserConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotConfiguration, + SnapshotJobId, + UserConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SnapshotConfiguration" => SnapshotConfiguration, + "SnapshotJobId" => SnapshotJobId, + "UserConfiguration" => UserConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -5948,12 +6561,12 @@ tags. If you specify a new tag key for the resource, this tag is appended to the tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. You can associate as many as 50 tags with a resource. Amazon QuickSight supports tagging on -data set, data source, dashboard, template, and topic. Tagging for Amazon QuickSight works -in a similar way to tagging for other Amazon Web Services services, except for the -following: You can't use tags to track costs for Amazon QuickSight. This isn't possible -because you can't tag the resources that Amazon QuickSight costs are based on, for example -Amazon QuickSight storage capacity (SPICE), number of users, type of users, and usage -metrics. Amazon QuickSight doesn't currently support the tag editor for Resource Groups. +data set, data source, dashboard, template, topic, and user. Tagging for Amazon QuickSight +works in a similar way to tagging for other Amazon Web Services services, except for the +following: Tags are used to track costs for users in Amazon QuickSight. You can't tag +other resources that Amazon QuickSight costs are based on, such as storage capacoty +(SPICE), session usage, alert consumption, or reporting units. Amazon QuickSight doesn't +currently support the tag editor for Resource Groups. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. @@ -6155,6 +6768,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ThemeArn"`: The Amazon Resource Name (ARN) for the theme to apply to the analysis that you're creating. To see the theme in the Amazon QuickSight console, make sure that you have access to it. +- `"ValidationStrategy"`: The option to relax the validation needed to update an analysis + with definition objects. This skips the validation step for specific errors. """ function update_analysis( AnalysisId, AwsAccountId, Name; aws_config::AbstractAWSConfig=global_aws_config() @@ -6272,6 +6887,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys dashboard. If you add a value for this field, it overrides the value that was originally associated with the entity. The theme ARN must exist in the same Amazon Web Services account where you create the dashboard. +- `"ValidationStrategy"`: The option to relax the validation needed to update a dashboard + with definition objects. This skips the validation step for specific errors. - `"VersionDescription"`: A description for the first version of the dashboard being created. """ @@ -6302,6 +6919,52 @@ function update_dashboard( ) end +""" + update_dashboard_links(aws_account_id, dashboard_id, link_entities) + update_dashboard_links(aws_account_id, dashboard_id, link_entities, params::Dict{String,<:Any}) + +Updates the linked analyses on a dashboard. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contains the dashboard + whose links you want to update. +- `dashboard_id`: The ID for the dashboard. +- `link_entities`: list of analysis Amazon Resource Names (ARNs) to be linked to the + dashboard. + +""" +function update_dashboard_links( + AwsAccountId, + DashboardId, + LinkEntities; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "PUT", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/linked-entities", + Dict{String,Any}("LinkEntities" => LinkEntities); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_dashboard_links( + AwsAccountId, + DashboardId, + LinkEntities, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "PUT", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/linked-entities", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("LinkEntities" => LinkEntities), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_dashboard_permissions(aws_account_id, dashboard_id) update_dashboard_permissions(aws_account_id, dashboard_id, params::Dict{String,<:Any}) @@ -6657,8 +7320,10 @@ Updates permissions of a folder. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"GrantPermissions"`: The permissions that you want to grant on a resource. -- `"RevokePermissions"`: The permissions that you want to revoke from a resource. +- `"GrantPermissions"`: The permissions that you want to grant on a resource. Namespace + ARNs are not supported Principal values for folder permissions. +- `"RevokePermissions"`: The permissions that you want to revoke from a resource. Namespace + ARNs are not supported Principal values for folder permissions. """ function update_folder_permissions( AwsAccountId, FolderId; aws_config::AbstractAWSConfig=global_aws_config() @@ -6783,13 +7448,58 @@ function update_iampolicy_assignment( ) end +""" + update_identity_propagation_config(aws_account_id, service) + update_identity_propagation_config(aws_account_id, service, params::Dict{String,<:Any}) + +Adds or updates services and authorized targets to configure what the Amazon QuickSight IAM +Identity Center application can access. This operation is only supported for Amazon +QuickSight accounts using IAM Identity Center + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contains the identity + propagation configuration that you want to update. +- `service`: The name of the Amazon Web Services service that contains the authorized + targets that you want to add or update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AuthorizedTargets"`: Specifies a list of application ARNs that represent the authorized + targets for a service. +""" +function update_identity_propagation_config( + AwsAccountId, Service; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/identity-propagation-config/$(Service)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_identity_propagation_config( + AwsAccountId, + Service, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/identity-propagation-config/$(Service)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_ip_restriction(aws_account_id) update_ip_restriction(aws_account_id, params::Dict{String,<:Any}) -Updates the content and status of IP rules. To use this operation, you must provide the -entire map of rules. You can use the DescribeIpRestriction operation to get the current -rule map. +Updates the content and status of IP rules. Traffic from a source is allowed when the +source satisfies either the IpRestrictionRule, VpcIdRestrictionRule, or +VpcEndpointIdRestrictionRule. To use this operation, you must provide the entire map of +rules. You can use the DescribeIpRestriction operation to get the current rule map. # Arguments - `aws_account_id`: The ID of the Amazon Web Services account that contains the IP rules. @@ -6799,6 +7509,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Enabled"`: A value that specifies whether IP rules are turned on. - `"IpRestrictionRuleMap"`: A map that describes the updated IP rules with CIDR ranges and descriptions. +- `"VpcEndpointIdRestrictionRuleMap"`: A map of allowed VPC endpoint IDs and their + corresponding rule descriptions. +- `"VpcIdRestrictionRuleMap"`: A map of VPC IDs and their corresponding rules. When you + configure this parameter, traffic from all VPC endpoints that are present in the specified + VPC is allowed. """ function update_ip_restriction( AwsAccountId; aws_config::AbstractAWSConfig=global_aws_config() @@ -6824,6 +7539,49 @@ function update_ip_restriction( ) end +""" + update_key_registration(aws_account_id, key_registration) + update_key_registration(aws_account_id, key_registration, params::Dict{String,<:Any}) + +Updates a customer managed key in a Amazon QuickSight account. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contains the customer + managed key registration that you want to update. +- `key_registration`: A list of RegisteredCustomerManagedKey objects to be updated to the + Amazon QuickSight account. + +""" +function update_key_registration( + AwsAccountId, KeyRegistration; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/key-registration", + Dict{String,Any}("KeyRegistration" => KeyRegistration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_key_registration( + AwsAccountId, + KeyRegistration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/key-registration", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("KeyRegistration" => KeyRegistration), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_public_sharing_settings(aws_account_id) update_public_sharing_settings(aws_account_id, params::Dict{String,<:Any}) @@ -6909,6 +7667,103 @@ function update_refresh_schedule( ) end +""" + update_role_custom_permission(aws_account_id, custom_permissions_name, namespace, role) + update_role_custom_permission(aws_account_id, custom_permissions_name, namespace, role, params::Dict{String,<:Any}) + +Updates the custom permissions that are associated with a role. + +# Arguments +- `aws_account_id`: The ID for the Amazon Web Services account that you want to create a + group in. The Amazon Web Services account ID that you provide must be the same Amazon Web + Services account that contains your Amazon QuickSight account. +- `custom_permissions_name`: The name of the custom permission that you want to update the + role with. +- `namespace`: The namespace that contains the role that you want to update. +- `role`: The name of role tht you want to update. + +""" +function update_role_custom_permission( + AwsAccountId, + CustomPermissionsName, + Namespace, + Role; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "PUT", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission", + Dict{String,Any}("CustomPermissionsName" => CustomPermissionsName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_role_custom_permission( + AwsAccountId, + CustomPermissionsName, + Namespace, + Role, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "PUT", + "/accounts/$(AwsAccountId)/namespaces/$(Namespace)/roles/$(Role)/custom-permission", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("CustomPermissionsName" => CustomPermissionsName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_spicecapacity_configuration(aws_account_id, purchase_mode) + update_spicecapacity_configuration(aws_account_id, purchase_mode, params::Dict{String,<:Any}) + +Updates the SPICE capacity configuration for a Amazon QuickSight account. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that contains the SPICE + configuration that you want to update. +- `purchase_mode`: Determines how SPICE capacity can be purchased. The following options + are available. MANUAL: SPICE capacity can only be purchased manually. AUTO_PURCHASE: + Extra SPICE capacity is automatically purchased on your behalf as needed. SPICE capacity + can also be purchased manually with this option. + +""" +function update_spicecapacity_configuration( + AwsAccountId, PurchaseMode; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/spice-capacity-configuration", + Dict{String,Any}("PurchaseMode" => PurchaseMode); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_spicecapacity_configuration( + AwsAccountId, + PurchaseMode, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/spice-capacity-configuration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("PurchaseMode" => PurchaseMode), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_template(aws_account_id, template_id) update_template(aws_account_id, template_id, params::Dict{String,<:Any}) @@ -6934,6 +7789,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder. +- `"ValidationStrategy"`: The option to relax the validation needed to update a template + with definition objects. This skips the validation step for specific errors. - `"VersionDescription"`: A description of the current template version that is being updated. Every time you call UpdateTemplate, you create a new version of the template. Each version of the template maintains a description of the version in the VersionDescription @@ -7359,9 +8216,16 @@ Updates an Amazon QuickSight user. - `role`: The Amazon QuickSight role of the user. The role can be one of the following default security cohorts: READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and dashboards. ADMIN: A - user who is an author, who can also manage Amazon QuickSight settings. The name of the - Amazon QuickSight role is invisible to the user except for the console screens dealing with - permissions. + user who is an author, who can also manage Amazon QuickSight settings. READER_PRO: + Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to + Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive + summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the + Author role. Author Pros can author dashboards with natural language with Amazon Q, build + stories with Amazon Q, create Topics for Q&A, and generate executive summaries from + dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight + administrative settings. Admin Pro users are billed at Author Pro pricing. The name of + the Amazon QuickSight role is invisible to the user except for the console screens dealing + with permissions. - `user_name`: The Amazon QuickSight user name that you want to update. # Optional Parameters diff --git a/src/services/ram.jl b/src/services/ram.jl index 7a2e18945e..e01a4d90a9 100644 --- a/src/services/ram.jl +++ b/src/services/ram.jl @@ -98,6 +98,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Access Manager User Guide. - `"resourceArns"`: Specifies a list of Amazon Resource Names (ARNs) of the resources that you want to share. This can be null if you want to add only principals. +- `"sources"`: Specifies from which source accounts the service principal has access to the + resources in this resource share. """ function associate_resource_share( resourceShareArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -410,6 +412,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Access Manager User Guide. - `"resourceArns"`: Specifies a list of one or more ARNs of the resources to associate with the resource share. +- `"sources"`: Specifies from which source accounts the service principal has access to the + resources in this resource share. - `"tags"`: Specifies one or more tags to attach to the resource share itself. It doesn't attach the tags to the resources associated with the resource share. """ @@ -639,6 +643,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"resourceArns"`: Specifies a list of Amazon Resource Names (ARNs) for one or more resources that you want to remove from the resource share. After the operation runs, these resources are no longer shared with principals associated with the resource share. +- `"sources"`: Specifies from which source accounts the service principal no longer has + access to the resources in this resource share. """ function disassociate_resource_share( resourceShareArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -894,8 +900,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys NextToken response to request the next page of results. - `"principal"`: Specifies the ID of the principal whose resource shares you want to retrieve. This can be an Amazon Web Services account ID, an organization ID, an - organizational unit ID, or the Amazon Resource Name (ARN) of an individual IAM user or - role. You cannot specify this parameter if the association type is RESOURCE. + organizational unit ID, or the Amazon Resource Name (ARN) of an individual IAM role or + user. You cannot specify this parameter if the association type is RESOURCE. - `"resourceArn"`: Specifies the Amazon Resource Name (ARN) of a resource whose resource shares you want to retrieve. You cannot specify this parameter if the association type is PRINCIPAL. diff --git a/src/services/rds.jl b/src/services/rds.jl index 475dcbfaf1..aaddc12b25 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -172,8 +172,9 @@ end Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a -Condition statement in an IAM policy for Amazon RDS. For an overview on tagging Amazon RDS -resources, see Tagging Amazon RDS Resources. +Condition statement in an IAM policy for Amazon RDS. For an overview on tagging your +relational database resources, see Tagging Amazon RDS Resources or Tagging Amazon Aurora +and Amazon RDS Resources. # Arguments - `resource_name`: The Amazon RDS resource that the tags are added to. This value is an @@ -222,10 +223,10 @@ end Applies a pending maintenance action to a resource (for example, to a DB instance). # Arguments -- `apply_action`: The pending maintenance action to apply to this resource. Valid values: +- `apply_action`: The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation - `opt_in_type`: A value that specifies the type of opt-in request, or undoes an opt-in - request. An opt-in request of type immediate can't be undone. Valid values: immediate - + request. An opt-in request of type immediate can't be undone. Valid Values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. @@ -364,12 +365,12 @@ User Guide. This action applies only to Aurora MySQL DB clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Force"`: A value that indicates whether to force the DB cluster to backtrack when - binary logging is enabled. Otherwise, an error occurs when binary logging is enabled. -- `"UseEarliestTimeOnPointInTimeUnavailable"`: A value that indicates whether to backtrack - the DB cluster to the earliest possible backtrack time when BacktrackTo is set to a - timestamp earlier than the earliest backtrack time. When this parameter is disabled and - BacktrackTo is set to a timestamp earlier than the earliest backtrack time, an error occurs. +- `"Force"`: Specifies whether to force the DB cluster to backtrack when binary logging is + enabled. Otherwise, an error occurs when binary logging is enabled. +- `"UseEarliestTimeOnPointInTimeUnavailable"`: Specifies whether to backtrack the DB + cluster to the earliest possible backtrack time when BacktrackTo is set to a timestamp + earlier than the earliest backtrack time. When this parameter is disabled and BacktrackTo + is set to a timestamp earlier than the earliest backtrack time, an error occurs. """ function backtrack_dbcluster( BacktrackTo, DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -450,7 +451,9 @@ end copy_dbcluster_parameter_group(source_dbcluster_parameter_group_identifier, target_dbcluster_parameter_group_description, target_dbcluster_parameter_group_identifier) copy_dbcluster_parameter_group(source_dbcluster_parameter_group_identifier, target_dbcluster_parameter_group_description, target_dbcluster_parameter_group_identifier, params::Dict{String,<:Any}) -Copies the specified DB cluster parameter group. +Copies the specified DB cluster parameter group. You can't copy a default DB cluster +parameter group. Instead, create a new custom DB cluster parameter group, which copies the +default parameters and values for the specified DB cluster parameter group family. # Arguments - `source_dbcluster_parameter_group_identifier`: The identifier or Amazon Resource Name @@ -559,8 +562,8 @@ on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CopyTags"`: A value that indicates whether to copy all tags from the source DB cluster - snapshot to the target DB cluster snapshot. By default, tags are not copied. +- `"CopyTags"`: Specifies whether to copy all tags from the source DB cluster snapshot to + the target DB cluster snapshot. By default, tags are not copied. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS key. If you copy an encrypted DB cluster @@ -652,7 +655,9 @@ end copy_dbparameter_group(source_dbparameter_group_identifier, target_dbparameter_group_description, target_dbparameter_group_identifier) copy_dbparameter_group(source_dbparameter_group_identifier, target_dbparameter_group_description, target_dbparameter_group_identifier, params::Dict{String,<:Any}) -Copies the specified DB parameter group. +Copies the specified DB parameter group. You can't copy a default DB parameter group. +Instead, create a new custom DB parameter group, which copies the default parameters and +values for the specified DB parameter group family. # Arguments - `source_dbparameter_group_identifier`: The identifier or ARN for the source DB parameter @@ -742,12 +747,12 @@ Amazon RDS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"CopyOptionGroup"`: A value that indicates whether to copy the DB option group - associated with the source DB snapshot to the target Amazon Web Services account and - associate with the target DB snapshot. The associated option group can be copied only with - cross-account snapshot copy calls. -- `"CopyTags"`: A value that indicates whether to copy all tags from the source DB snapshot - to the target DB snapshot. By default, tags aren't copied. +- `"CopyOptionGroup"`: Specifies whether to copy the DB option group associated with the + source DB snapshot to the target Amazon Web Services account and associate with the target + DB snapshot. The associated option group can be copied only with cross-account snapshot + copy calls. +- `"CopyTags"`: Specifies whether to copy all tags from the source DB snapshot to the + target DB snapshot. By default, tags aren't copied. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you copy an encrypted DB snapshot from your Amazon Web Services @@ -939,11 +944,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TargetDBClusterParameterGroupName"`: The DB cluster parameter group associated with the Aurora DB cluster in the green environment. To test parameter changes, specify a DB cluster parameter group that is different from the one associated with the source DB cluster. +- `"TargetDBInstanceClass"`: Specify the DB instance class for the databases in the green + environment. This parameter only applies to RDS DB instances, because DB instances within + an Aurora DB cluster can have multiple different instance classes. If you're creating a + blue/green deployment from an Aurora DB cluster, don't specify this parameter. After the + green environment is created, you can individually modify the instance classes of the DB + instances within the green DB cluster. - `"TargetDBParameterGroupName"`: The DB parameter group associated with the DB instance in the green environment. To test parameter changes, specify a DB parameter group that is different from the one associated with the source DB instance. - `"TargetEngineVersion"`: The engine version of the database in the green environment. Specify the engine version to upgrade to in the green environment. +- `"UpgradeTargetStorageConfig"`: Whether to upgrade the storage file system configuration + on the green database. This option migrates the green DB instance from the older 32-bit + file system to the preferred configuration. For more information, see Upgrading the storage + file system for a DB instance. """ function create_blue_green_deployment( BlueGreenDeploymentName, Source; aws_config::AbstractAWSConfig=global_aws_config() @@ -986,8 +1001,8 @@ end Creates a custom DB engine version (CEV). # Arguments -- `engine`: The database engine to use for your custom engine version (CEV). The only - supported value is custom-oracle-ee. +- `engine`: The database engine. RDS Custom for Oracle supports the following values: + custom-oracle-ee custom-oracle-ee-cdb custom-oracle-se2 custom-oracle-se2-cdb - `engine_version`: The name of your CEV. The name format is 19.customized_string. For example, a valid CEV name is 19.my_cev1. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and EngineVersion is unique @@ -1024,7 +1039,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches. For more information, see Creating the CEV manifest in the Amazon RDS User Guide. +- `"SourceCustomDbEngineVersionIdentifier"`: The ARN of a CEV to use as a source for + creating a new CEV. You can specify a different Amazon Machine Imagine (AMI) by using + either Source or UseAwsProvidedLatestImage. You can't specify a different JSON manifest + when you specify SourceCustomDbEngineVersionIdentifier. - `"Tags"`: +- `"UseAwsProvidedLatestImage"`: Specifies whether to use the latest service-provided + Amazon Machine Image (AMI) for the CEV. If you specify UseAwsProvidedLatestImage, you can't + also specify ImageId. """ function create_custom_dbengine_version( Engine, EngineVersion; aws_config::AbstractAWSConfig=global_aws_config() @@ -1073,103 +1095,131 @@ RDS for MySQL or PostgreSQL DB instance as the source. For more information abou DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. # Arguments -- `dbcluster_identifier`: The DB cluster identifier. This parameter is stored as a - lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. - First character must be a letter. Can't end with a hyphen or contain two consecutive - hyphens. Example: my-cluster1 Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `engine`: The name of the database engine to be used for this DB cluster. Valid Values: - aurora-mysql aurora-postgresql mysql postgres Valid for: Aurora DB clusters - and Multi-AZ DB clusters +- `dbcluster_identifier`: The identifier for this DB cluster. This parameter is stored as a + lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + Constraints: Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ + DB clusters) letters, numbers, or hyphens. First character must be a letter. Can't end + with a hyphen or contain two consecutive hyphens. Example: my-cluster1 +- `engine`: The database engine to use for this DB cluster. Valid for Cluster Type: Aurora + DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql + mysql postgres neptune - For information about using Amazon Neptune, see the + Amazon Neptune User Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate to each DB - instance in the Multi-AZ DB cluster. This setting is required to create a Multi-AZ DB - cluster. Valid for: Multi-AZ DB clusters only -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB cluster during the maintenance window. By default, minor - engine upgrades are applied automatically. Valid for: Multi-AZ DB clusters only -- `"AvailabilityZones"`: A list of Availability Zones (AZs) where DB instances in the DB - cluster can be created. For information on Amazon Web Services Regions and Availability - Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide. - Valid for: Aurora DB clusters only + instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This + setting is required to create a Multi-AZ DB cluster. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB cluster during the maintenance window. By default, minor engine + upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only +- `"AvailabilityZones"`: A list of Availability Zones (AZs) where you specifically want to + create DB instances in the DB cluster. For information on AZs, see Availability Zones in + the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: + Can't specify more than three AZs. - `"BacktrackWindow"`: The target backtrack window, in seconds. To disable backtracking, - set this value to 0. Default: 0 Constraints: If specified, this value must be set to a - number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only + set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 + Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 + hours). - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. - Default: 1 Constraints: Must be a value from 1 to 35 Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"CharacterSetName"`: A value that indicates that the DB cluster should be associated - with the specified CharacterSet. Valid for: Aurora DB clusters only -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - cluster to snapshots of the DB cluster. The default is not to copy them. Valid for: Aurora - DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 + Constraints: Must be a value from 1 to 35. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB cluster's + server certificate. For more information, see Using SSL/TLS to encrypt a connection to a DB + instance in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters +- `"CharacterSetName"`: The name of the character set (CharacterSet) to associate the DB + cluster with. Valid for Cluster Type: Aurora DB clusters only +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB cluster to + snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS - User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for: Multi-AZ - DB clusters only + User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster + Type: Multi-AZ DB clusters only - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to associate - with this DB cluster. If you do not specify a value, then the default DB cluster parameter - group for the specified DB engine and version is used. Constraints: If supplied, must - match the name of an existing DB cluster parameter group. Valid for: Aurora DB clusters - and Multi-AZ DB clusters + with this DB cluster. If you don't specify a value, then the default DB cluster parameter + group for the specified DB engine and version is used. Valid for Cluster Type: Aurora DB + clusters and Multi-AZ DB clusters Constraints: If supplied, must match the name of an + existing DB cluster parameter group. - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting - is required to create a Multi-AZ DB cluster. Constraints: Must match the name of an - existing DBSubnetGroup. Must not be default. Example: mydbsubnetgroup Valid for: Aurora DB - clusters and Multi-AZ DB clusters + is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. + Must not be default. Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. -- `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. If you - do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are - creating. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB +- `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. A + database named postgres is always created. If this parameter is specified, an additional + database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ + DB clusters +- `"DeletionProtection"`: Specifies whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. By default, deletion + protection isn't enabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"Domain"`: The Active Directory directory ID to create the DB cluster in. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos authentication in the Amazon - Aurora User Guide. Valid for: Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. Valid for Cluster Type: Aurora DB clusters only - `"EnableCloudwatchLogsExports"`: The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. - RDS for MySQL Possible values are error, general, and slowquery. RDS for PostgreSQL - Possible values are postgresql and upgrade. Aurora MySQL Possible values are audit, - error, general, and slowquery. Aurora PostgreSQL Possible value is postgresql. For more - information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to - Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting - CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs - in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"EnableGlobalWriteForwarding"`: A value that indicates whether to enable this DB cluster - to forward write operations to the primary cluster of an Aurora global database - (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that - are secondary clusters in an Aurora global database. You can set this value only on Aurora - DB clusters that are members of an Aurora global database. With this parameter enabled, a - secondary cluster can forward writes to the current primary cluster and the resulting - changes are replicated back to this cluster. For the primary DB cluster of an Aurora global - database, this value is used immediately if the primary is demoted by the - FailoverGlobalCluster API operation, but it does nothing until then. Valid for: Aurora DB + exporting to CloudWatch Logs. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters The following values are valid for each DB engine: Aurora MySQL - audit | error + | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general + | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about + exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch + Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for + Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora + User Guide. +- `"EnableGlobalWriteForwarding"`: Specifies whether to enable this DB cluster to forward + write operations to the primary cluster of a global cluster (Aurora global database). By + default, write operations are not allowed on Aurora DB clusters that are secondary clusters + in an Aurora global database. You can set this value only on Aurora DB clusters that are + members of an Aurora global database. With this parameter enabled, a secondary cluster can + forward writes to the current primary cluster, and the resulting changes are replicated + back to this cluster. For the primary DB cluster of an Aurora global database, this value + is used immediately if the primary is demoted by a global cluster API operation, but it + does nothing until then. Valid for Cluster Type: Aurora DB clusters only +- `"EnableHttpEndpoint"`: Specifies whether to enable the HTTP endpoint for the DB cluster. + By default, the HTTP endpoint isn't enabled. When enabled, the HTTP endpoint provides a + connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. + You can also query your database from inside the RDS console with the RDS query editor. RDS + Data API is supported with the following DB clusters: Aurora PostgreSQL Serverless v2 and + provisioned Aurora PostgreSQL and Aurora MySQL Serverless v1 For more information, see + Using RDS Data API in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB + clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnableLimitlessDatabase"`: Specifies whether to enable Aurora Limitless Database. You + must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only -- `"EnableHttpEndpoint"`: A value that indicates whether to enable the HTTP endpoint for an - Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, - the HTTP endpoint provides a connectionless web service API for running SQL queries on the - Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS - console with the query editor. For more information, see Using the Data API for Aurora - Serverless v1 in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnablePerformanceInsights"`: A value that indicates whether to turn on Performance - Insights for the DB cluster. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only +- `"EnableLocalWriteForwarding"`: Specifies whether read replicas can forward write + operations to the writer DB instance in the DB cluster. By default, write operations aren't + allowed on reader DB instances. Valid for: Aurora DB clusters only +- `"EnablePerformanceInsights"`: Specifies whether to turn on Performance Insights for the + DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only +- `"EngineLifecycleSupport"`: The life cycle type for this DB cluster. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon + RDS Extended Support. At the end of standard support, you can avoid charges for Extended + Support by setting the value to open-source-rds-extended-support-disabled. In this case, + creating the DB cluster will fail if the DB major version is past its end of standard + support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended + Support. With RDS Extended Support, you can run the selected major engine version on your + DB cluster past the end of standard support for that engine version. For more information, + see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended + Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support + in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Valid Values: open-source-rds-extended-support | + open-source-rds-extended-support-disabled Default: open-source-rds-extended-support - `"EngineMode"`: The DB engine mode of the DB cluster, either provisioned or serverless. - The serverless engine mode only applies for Aurora Serverless v1 DB clusters. For - information about limitations and requirements for Serverless DB clusters, see the - following sections in the Amazon Aurora User Guide: Limitations of Aurora Serverless v1 - Requirements for Aurora Serverless v2 Valid for: Aurora DB clusters only + The serverless engine mode only applies for Aurora Serverless v1 DB clusters. Aurora + Serverless v2 DB clusters use the provisioned engine mode. For information about + limitations and requirements for Serverless DB clusters, see the following sections in the + Amazon Aurora User Guide: Limitations of Aurora Serverless v1 Requirements for + Aurora Serverless v2 Valid for Cluster Type: Aurora DB clusters only - `"EngineVersion"`: The version number of the database engine to use. To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command: aws rds describe-db-engine-versions --engine @@ -1181,45 +1231,47 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions - --engine postgres --query \"DBEngineVersions[].EngineVersion\" Aurora MySQL For - information, see Database engine updates for Amazon Aurora MySQL in the Amazon Aurora User - Guide. Aurora PostgreSQL For information, see Amazon Aurora PostgreSQL releases and - engine versions in the Amazon Aurora User Guide. MySQL For information, see Amazon RDS - for MySQL in the Amazon RDS User Guide. PostgreSQL For information, see Amazon RDS for - PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + --engine postgres --query \"DBEngineVersions[].EngineVersion\" For information about a + specific engine, see the following topics: Aurora MySQL - see Database engine updates for + Amazon Aurora MySQL in the Amazon Aurora User Guide. Aurora PostgreSQL - see Amazon + Aurora PostgreSQL releases and engine versions in the Amazon Aurora User Guide. RDS for + MySQL - see Amazon RDS for MySQL in the Amazon RDS User Guide. RDS for PostgreSQL - see + Amazon RDS for PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB + clusters and Multi-AZ DB clusters - `"GlobalClusterIdentifier"`: The global cluster ID of an Aurora cluster that becomes the - primary cluster in the new global database cluster. Valid for: Aurora DB clusters only + primary cluster in the new global database cluster. Valid for Cluster Type: Aurora DB + clusters only - `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Provisioned IOPS storage in the Amazon RDS User Guide. This setting - is required to create a Multi-AZ DB cluster. Constraints: Must be a multiple between .5 and - 50 of the storage amount for the DB cluster. Valid for: Multi-AZ DB clusters only + is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters + only Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB + cluster. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. When a KMS key isn't specified in KmsKeyId: If - ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the - KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key. - If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't - specified, then Amazon RDS will use your default KMS key. There is a default KMS key for - your Amazon Web Services account. Your Amazon Web Services account has a different default - KMS key for each Amazon Web Services Region. If you create a read replica of an encrypted - DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key - identifier that is valid in the destination Amazon Web Services Region. This KMS key is - used to encrypt the read replica in that Amazon Web Services Region. Valid for: Aurora DB - clusters and Multi-AZ DB clusters -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and - Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User - Guide. Constraints: Can't manage the master user password with Amazon Web Services - Secrets Manager if MasterUserPassword is specified. Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"MasterUserPassword"`: The password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned - on. Valid for: Aurora DB clusters and Multi-AZ DB clusters + ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS uses the KMS + key used to encrypt the source. Otherwise, Amazon RDS uses your default KMS key. If the + StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then + Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web + Services account. Your Amazon Web Services account has a different default KMS key for each + Amazon Web Services Region. If you create a read replica of an encrypted DB cluster in + another Amazon Web Services Region, make sure to set KmsKeyId to a KMS key identifier that + is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the + read replica in that Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management + with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. Valid for + Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Can't manage the + master user password with Amazon Web Services Secrets Manager if MasterUserPassword is + specified. +- `"MasterUserPassword"`: The password for the master database user. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 + characters. Can contain any printable ASCII character except \"/\", \"\"\", or \"@\". + Can't be specified if ManageMasterUserPassword is turned on. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web @@ -1231,47 +1283,45 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"MasterUsername"`: The name of the master user for the DB cluster. Constraints: Must - be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved - word for the chosen database engine. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUsername"`: The name of the master user for the DB cluster. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be 1 to 16 letters or + numbers. First character must be a letter. Can't be a reserved word for the chosen + database engine. - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, also set - MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 Valid - for: Multi-AZ DB clusters only + metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a + value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid - for: Multi-AZ DB clusters only -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB cluster. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only -- `"OptionGroupName"`: A value that indicates that the DB cluster should be associated with - the specified option group. DB clusters are associated with a default option group that - can't be modified. + for Cluster Type: Multi-AZ DB clusters only +- `"NetworkType"`: The network type of the DB cluster. The network type is determined by + the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB + clusters only Valid Values: IPV4 | DUAL +- `"OptionGroupName"`: The option group to associate the DB cluster with. DB clusters are + associated with a default option group that can't be modified. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a - different default KMS key for each Amazon Web Services Region. Valid for: Multi-AZ DB - clusters only -- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. Valid for: + different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only +- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights + data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, + where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * + 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that + isn't valid, such as 94, Amazon RDS issues an error. - `"Port"`: The port number on which the instances in the DB cluster accept connections. - RDS for MySQL and Aurora MySQL Default: 3306 Valid values: 1150-65535 RDS for - PostgreSQL and Aurora PostgreSQL Default: 5432 Valid values: 1150-65535 Valid for: - Aurora DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: + 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and + Aurora PostgreSQL - 5432 - `"PreSignedUrl"`: When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region @@ -1295,62 +1345,66 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that - can run in the source Amazon Web Services Region. Valid for: Aurora DB clusters onlyIf you - supply a value for this operation's SourceRegion parameter, a pre-signed URL will be - calculated on your behalf. + can run in the source Amazon Web Services Region. Valid for Cluster Type: Aurora DB + clusters onlyIf you supply a value for this operation's SourceRegion parameter, a + pre-signed URL will be calculated on your behalf. - `"PreferredBackupWindow"`: The daily time range during which automated backups are - created if automated backups are enabled using the BackupRetentionPeriod parameter. The - default is a 30-minute window selected at random from an 8-hour block of time for each - Amazon Web Services Region. To view the time blocks available, see Backup window in the - Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be - in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + created if automated backups are enabled using the BackupRetentionPeriod parameter. Valid + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute + window selected at random from an 8-hour block of time for each Amazon Web Services Region. + To view the time blocks available, see Backup window in the Amazon Aurora User Guide. + Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated + Time (UTC). Must not conflict with the preferred maintenance window. Must be at least + 30 minutes. - `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can - occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is - a 30-minute window selected at random from an 8-hour block of time for each Amazon Web + occur. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a + 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. - Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"PubliclyAccessible"`: A value that indicates whether the DB cluster is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with - a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the - target Region doesn’t have an internet gateway attached to it, the DB cluster is private. - If the default VPC in the target Region has an internet gateway attached to it, the DB - cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't - specified, the following applies: If the subnets are part of a VPC that doesn’t have an - internet gateway attached to it, the DB cluster is private. If the subnets are part of a - VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: - Multi-AZ DB clusters only + Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | + Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be + at least 30 minutes. +- `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the + DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the + private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to + the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is + ultimately controlled by the security group it uses. That public access isn't permitted if + the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't + publicly accessible, it is an internal DB cluster with a DNS name that resolves to a + private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default + behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName + isn't specified, and PubliclyAccessible isn't specified, the following applies: If the + default VPC in the target Region doesn’t have an internet gateway attached to it, the DB + cluster is private. If the default VPC in the target Region has an internet gateway + attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and + PubliclyAccessible isn't specified, the following applies: If the subnets are part of a + VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If + the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster + is public. +- `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance - or DB cluster if this DB cluster is created as a read replica. Valid for: Aurora DB - clusters and Multi-AZ DB clusters + or DB cluster if this DB cluster is created as a read replica. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"ScalingConfiguration"`: For DB clusters in serverless DB engine mode, the scaling - properties of the DB cluster. Valid for: Aurora DB clusters only + properties of the DB cluster. Valid for Cluster Type: Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: - `"SourceRegion"`: The ID of the region that contains the source for the read replica. -- `"StorageEncrypted"`: A value that indicates whether the DB cluster is encrypted. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. This - setting is required to create a Multi-AZ DB cluster. When specified for a Multi-AZ DB - cluster, a value for the Iops parameter is required. Valid values: aurora, aurora-iopt1 - (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 - (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters For more - information on storage types for Aurora DB clusters, see Storage configurations for Amazon - Aurora DB clusters. For more information on storage types for Multi-AZ DB clusters, see - Settings for creating Multi-AZ DB clusters. -- `"Tags"`: Tags to assign to the DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB - clusters +- `"StorageEncrypted"`: Specifies whether the DB cluster is encrypted. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB + clusters. For information on storage types for Multi-AZ DB clusters, see Settings for + creating Multi-AZ DB clusters. This setting is required to create a Multi-AZ DB cluster. + When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: Aurora DB + clusters - aurora | aurora-iopt1 Multi-AZ DB clusters - io1 | io2 | gp3 Default: + Aurora DB clusters - aurora Multi-AZ DB clusters - io1 When you create an Aurora DB + cluster with the storage type set to aurora-iopt1, the storage type is returned in the + response. The storage type isn't returned when you set it to aurora. +- `"Tags"`: Tags to assign to the DB cluster. Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB - cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters """ function create_dbcluster( DBClusterIdentifier, Engine; aws_config::AbstractAWSConfig=global_aws_config() @@ -1485,7 +1539,7 @@ in the Amazon RDS User Guide. and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family. Aurora MySQL Example: aurora-mysql5.7, aurora-mysql8.0 Aurora PostgreSQL Example: aurora-postgresql14 RDS - for MySQL Example: mysql8.0 RDS for PostgreSQL Example: postgres12 To list all of the + for MySQL Example: mysql8.0 RDS for PostgreSQL Example: postgres13 To list all of the available parameter group families for a DB engine, use the following command: aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine> For example, to list all of the available parameter group families for the @@ -1617,231 +1671,288 @@ Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide. or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. -- `dbinstance_identifier`: The DB instance identifier. This parameter is stored as a - lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. - First character must be a letter. Can't end with a hyphen or contain two consecutive +- `dbinstance_identifier`: The identifier for this DB instance. This parameter is stored as + a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. + First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance -- `engine`: The name of the database engine to be used for this instance. Not every - database engine is available for every Amazon Web Services Region. Valid Values: - aurora-mysql (for Aurora MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB - instances) custom-oracle-ee (for RDS Custom for Oracle DB instances) - custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) custom-sqlserver-ee (for - RDS Custom for SQL Server DB instances) custom-sqlserver-se (for RDS Custom for SQL - Server DB instances) custom-sqlserver-web (for RDS Custom for SQL Server DB instances) - mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb - postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web +- `engine`: The database engine to use for this DB instance. Not every database engine is + available in every Amazon Web Services Region. Valid Values: aurora-mysql (for Aurora + MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB instances) + custom-oracle-ee (for RDS Custom for Oracle DB instances) custom-oracle-ee-cdb (for RDS + Custom for Oracle DB instances) custom-oracle-se2 (for RDS Custom for Oracle DB + instances) custom-oracle-se2-cdb (for RDS Custom for Oracle DB instances) + custom-sqlserver-ee (for RDS Custom for SQL Server DB instances) custom-sqlserver-se + (for RDS Custom for SQL Server DB instances) custom-sqlserver-web (for RDS Custom for + SQL Server DB instances) db2-ae db2-se mariadb mysql oracle-ee + oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee + sqlserver-se sqlserver-ex sqlserver-web # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate for the DB - instance. Type: Integer Amazon Aurora Not applicable. Aurora cluster volumes + instance. This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume. Amazon RDS Custom Constraints to the amount of storage for each storage type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for - Oracle, 16384 for RDS Custom for SQL Server. Provisioned IOPS storage (io1): Must be an - integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. - MySQL Constraints to the amount of storage for each storage type are the following: + Oracle, 16384 for RDS Custom for SQL Server. Provisioned IOPS storage (io1, io2): Must be + an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. + RDS for Db2 Constraints to the amount of storage for each storage type are the + following: General Purpose (SSD) storage (gp3): Must be an integer from 20 to 65536. + Provisioned IOPS storage (io1, io2): Must be an integer from 100 to 65536. RDS for + MariaDB Constraints to the amount of storage for each storage type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage - (standard): Must be an integer from 5 to 3072. MariaDB Constraints to the amount of - storage for each storage type are the following: General Purpose (SSD) storage (gp2, - gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be an - integer from 100 to 65536. Magnetic storage (standard): Must be an integer from 5 to - 3072. PostgreSQL Constraints to the amount of storage for each storage type are the - following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage - (standard): Must be an integer from 5 to 3072. Oracle Constraints to the amount of + Provisioned IOPS storage (io1, io2): Must be an integer from 100 to 65536. Magnetic + storage (standard): Must be an integer from 5 to 3072. RDS for MySQL Constraints to the + amount of storage for each storage type are the following: General Purpose (SSD) storage + (gp2, gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1, io2): + Must be an integer from 100 to 65536. Magnetic storage (standard): Must be an integer + from 5 to 3072. RDS for Oracle Constraints to the amount of storage for each storage + type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from + 20 to 65536. Provisioned IOPS storage (io1, io2): Must be an integer from 100 to 65536. + Magnetic storage (standard): Must be an integer from 10 to 3072. RDS for PostgreSQL + Constraints to the amount of storage for each storage type are the following: General + Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. Provisioned IOPS + storage (io1, io2): Must be an integer from 100 to 65536. Magnetic storage (standard): + Must be an integer from 5 to 3072. RDS for SQL Server Constraints to the amount of storage for each storage type are the following: General Purpose (SSD) storage (gp2, - gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be an - integer from 100 to 65536. Magnetic storage (standard): Must be an integer from 10 to - 3072. SQL Server Constraints to the amount of storage for each storage type are the - following: General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions: - Must be an integer from 20 to 16384. Web and Express editions: Must be an integer from 20 - to 16384. Provisioned IOPS storage (io1): Enterprise and Standard editions: Must be - an integer from 100 to 16384. Web and Express editions: Must be an integer from 100 to - 16384. Magnetic storage (standard): Enterprise and Standard editions: Must be an - integer from 20 to 1024. Web and Express editions: Must be an integer from 20 to 1024. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB instance during the maintenance window. By default, minor - engine upgrades are applied automatically. If you create an RDS Custom DB instance, you - must set AutoMinorVersionUpgrade to false. + gp3): Enterprise and Standard editions: Must be an integer from 20 to 16384. Web and + Express editions: Must be an integer from 20 to 16384. Provisioned IOPS storage (io1, + io2): Enterprise and Standard editions: Must be an integer from 100 to 16384. Web and + Express editions: Must be an integer from 100 to 16384. Magnetic storage (standard): + Enterprise and Standard editions: Must be an integer from 20 to 1024. Web and Express + editions: Must be an integer from 20 to 1024. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB instance during the maintenance window. By default, minor engine + upgrades are applied automatically. If you create an RDS Custom DB instance, you must set + AutoMinorVersionUpgrade to false. - `"AvailabilityZone"`: The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and - Availability Zones. Amazon Aurora Each Aurora DB cluster hosts copies of its storage in - three separate Availability Zones. Specify one of these Availability Zones. Aurora + Availability Zones. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage + in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. - Example: us-east-1d Constraint: The AvailabilityZone parameter can't be specified if the - DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same - Amazon Web Services Region as the current endpoint. + Constraints: The AvailabilityZone parameter can't be specified if the DB instance is a + Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web + Services Region as the current endpoint. Example: us-east-1d - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 - disables automated backups. Amazon Aurora Not applicable. The retention period for - automated backups is managed by the DB cluster. Default: 1 Constraints: Must be a value - from 0 to 35 Can't be set to 0 if the DB instance is a source to read replicas Can't be - set to 0 for an RDS Custom for Oracle DB instance -- `"BackupTarget"`: Specifies where automated backups and manual snapshots are stored. - Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services - Region). The default is region. For more information, see Working with Amazon RDS on Amazon - Web Services Outposts in the Amazon RDS User Guide. -- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB - instance’s server certificate. This setting doesn't apply to RDS Custom. For more + disables automated backups. This setting doesn't apply to Amazon Aurora DB instances. The + retention period for automated backups is managed by the DB cluster. Default: 1 + Constraints: Must be a value from 0 to 35. Can't be set to 0 if the DB instance is a + source to read replicas. Can't be set to 0 for an RDS Custom for Oracle DB instance. +- `"BackupTarget"`: The location for storing automated backups and manual snapshots. Valid + Values: outposts (Amazon Web Services Outposts) region (Amazon Web Services Region) + Default: region For more information, see Working with Amazon RDS on Amazon Web Services + Outposts in the Amazon RDS User Guide. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. -- `"CharacterSetName"`: For supported engines, this value indicates that the DB instance - should be associated with the specified CharacterSet. This setting doesn't apply to RDS - Custom. However, if you need to change the character set, you can change it on the database - itself. Amazon Aurora Not applicable. The character set is managed by the DB cluster. For - more information, see CreateDBCluster. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy tags from the DB instance - to snapshots of the DB instance. By default, tags are not copied. Amazon Aurora Not - applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for - an Aurora DB instance has no effect on the DB cluster setting. +- `"CharacterSetName"`: For supported engines, the character set (CharacterSet) to + associate the DB instance with. This setting doesn't apply to the following DB instances: + Amazon Aurora - The character set is managed by the DB cluster. For more information, see + CreateDBCluster. RDS Custom - However, if you need to change the character set, you can + change it on the database itself. +- `"CopyTagsToSnapshot"`: Specifies whether to copy tags from the DB instance to snapshots + of the DB instance. By default, tags are not copied. This setting doesn't apply to Amazon + Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this + value for an Aurora DB instance has no effect on the DB cluster setting. - `"CustomIamInstanceProfile"`: The instance profile associated with the underlying Amazon - EC2 instance of an RDS Custom DB instance. The instance profile must meet the following - requirements: The profile must exist in your account. The profile must have an IAM role + EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. + Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. - This setting is required for RDS Custom. -- `"DBClusterIdentifier"`: The identifier of the DB cluster that the instance will belong - to. This setting doesn't apply to RDS Custom. +- `"DBClusterIdentifier"`: The identifier of the DB cluster that this DB instance will + belong to. This setting doesn't apply to RDS Custom DB instances. - `"DBName"`: The meaning of this parameter differs according to the database engine you - use. MySQL The name of the database to create when the DB instance is created. If this + use. Amazon Aurora MySQL The name of the database to create when the primary DB instance + of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora + MySQL DB cluster, no database is created in the DB cluster. Constraints: Must contain 1 + to 64 alphanumeric characters. Can't be a word reserved by the database engine. Amazon + Aurora PostgreSQL The name of the database to create when the primary DB instance of the + Aurora PostgreSQL DB cluster is created. A database named postgres is always created. If + this parameter is specified, an additional database with this name is created. Constraints: + It must contain 1 to 63 alphanumeric characters. Must begin with a letter. Subsequent + characters can be letters, underscores, or digits (0 to 9). Can't be a word reserved by + the database engine. Amazon RDS Custom for Oracle The Oracle System ID (SID) of the + created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for + non-CDBs and RDSCDB for CDBs. Default: ORCL Constraints: Must contain 1 to 8 + alphanumeric characters. Must contain a letter. Can't be a word reserved by the + database engine. Amazon RDS Custom for SQL Server Not applicable. Must be null. RDS + for Db2 The name of the database to create when the DB instance is created. If this + parameter isn't specified, no database is created in the DB instance. In some cases, we + recommend that you don't add a database name. For more information, see Additional + considerations in the Amazon RDS User Guide. Constraints: Must contain 1 to 64 letters or + numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or + digits (0-9). Can't be a word reserved by the specified database engine. RDS for + MariaDB The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified - database engine MariaDB The name of the database to create when the DB instance is - created. If this parameter isn't specified, no database is created in the DB instance. + database engine. RDS for MySQL The name of the database to create when the DB instance + is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word - reserved by the specified database engine PostgreSQL The name of the database to create - when the DB instance is created. If this parameter isn't specified, a database named - postgres is created in the DB instance. Constraints: Must contain 1 to 63 letters, - numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, - underscores, or digits (0-9). Can't be a word reserved by the specified database engine - Oracle The Oracle System ID (SID) of the created DB instance. If you specify null, the - default value ORCL is used. You can't specify the string NULL, or any other reserved word, - for DBName. Default: ORCL Constraints: Can't be longer than 8 characters Amazon RDS - Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If you - don't specify a value, the default value is ORCL. Default: ORCL Constraints: It must - contain 1 to 8 alphanumeric characters. It must contain a letter. It can't be a word - reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must - be null. SQL Server Not applicable. Must be null. Amazon Aurora MySQL The name of the - database to create when the primary DB instance of the Aurora MySQL DB cluster is created. - If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in - the DB cluster. Constraints: It must contain 1 to 64 alphanumeric characters. It can't - be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the - database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is - created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database - named postgres is created in the DB cluster. Constraints: It must contain 1 to 63 - alphanumeric characters. It must begin with a letter. Subsequent characters can be - letters, underscores, or digits (0 to 9). It can't be a word reserved by the database - engine. + reserved by the specified database engine. RDS for Oracle The Oracle System ID (SID) of + the created DB instance. If you don't specify a value, the default value is ORCL. You can't + specify the string null, or any other reserved word, for DBName. Default: ORCL + Constraints: Can't be longer than 8 characters. RDS for PostgreSQL The name of the + database to create when the DB instance is created. A database named postgres is always + created. If this parameter is specified, an additional database with this name is created. + Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a + letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a + word reserved by the specified database engine. RDS for SQL Server Not applicable. Must + be null. - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB - instance. If you do not specify a value, then the default DB parameter group for the - specified DB engine and version is used. This setting doesn't apply to RDS Custom. - Constraints: It must be 1 to 255 letters, numbers, or hyphens. The first character must - be a letter. It can't end with a hyphen or contain two consecutive hyphens. + instance. If you don't specify a value, then Amazon RDS uses the default DB parameter group + for the specified DB engine and version. This setting doesn't apply to RDS Custom DB + instances. Constraints: Must be 1 to 255 letters, numbers, or hyphens. The first + character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. - `"DBSecurityGroups"`: A list of DB security groups to associate with this DB instance. This setting applies to the legacy EC2-Classic platform, which is no longer used to create new DB instances. Use the VpcSecurityGroupIds setting instead. - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB instance. Constraints: - Must match the name of an existing DBSubnetGroup. Must not be default. Example: + Must match the name of an existing DB subnet group. Must not be default. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. Amazon Aurora Not applicable. You can enable or disable deletion protection for - the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can - be deleted even when deletion protection is enabled for the DB cluster. +- `"DBSystemId"`: The Oracle system identifier (SID), which is the name of the Oracle + database instance that manages your database files. In this context, the term \"Oracle + database instance\" refers exclusively to the system global area (SGA) and Oracle + background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle + SID is also the name of your CDB. +- `"DedicatedLogVolume"`: Indicates whether the DB instance has a dedicated log volume + (DLV) enabled. +- `"DeletionProtection"`: Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. This + setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion + protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a + DB cluster can be deleted even when deletion protection is enabled for the DB cluster. - `"Domain"`: The Active Directory directory ID to create the DB instance in. Currently, - only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an - Active Directory Domain. For more information, see Kerberos Authentication in the Amazon - RDS User Guide. This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. - The domain is managed by the DB cluster. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. Amazon Aurora Not - applicable. The domain is managed by the DB cluster. -- `"EnableCloudwatchLogsExports"`: The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend on the DB engine. For more - information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User - Guide. Amazon Aurora Not applicable. CloudWatch Logs exports are managed by the DB - cluster. RDS Custom Not applicable. MariaDB Possible values are audit, error, general, - and slowquery. Microsoft SQL Server Possible values are agent and error. MySQL Possible - values are audit, error, general, and slowquery. Oracle Possible values are alert, audit, - listener, trace, and oemagent. PostgreSQL Possible values are postgresql and upgrade. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. For more information - about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the - Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in - the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication for - MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS - Custom. Amazon Aurora Not applicable. Mapping Amazon Web Services IAM accounts to - database accounts is managed by the DB cluster. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"EngineVersion"`: The version number of the database engine to use. For a list of valid - engine versions, use the DescribeDBEngineVersions operation. The following are the database - engines and links to information about the major and minor versions that are available with - Amazon RDS. Not every database engine is available for every Amazon Web Services Region. - Amazon Aurora Not applicable. The version number of the database engine to be used by the - DB instance is managed by the DB cluster. Amazon RDS Custom for Oracle A custom engine - version (CEV) that you have previously created. This setting is required for RDS Custom for - Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is - 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the - Amazon RDS User Guide. Amazon RDS Custom for SQL Server See RDS Custom for SQL Server - general requirements in the Amazon RDS User Guide. MariaDB For information, see MariaDB - on Amazon RDS Versions in the Amazon RDS User Guide. Microsoft SQL Server For - information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide. - MySQL For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide. - Oracle For information, see Oracle Database Engine Release Notes in the Amazon RDS User - Guide. PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions - in the Amazon RDS User Guide. -- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be - initially allocated for the DB instance. For information about valid IOPS values, see - Amazon RDS DB instance storage in the Amazon RDS User Guide. Constraints: For MariaDB, - MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the - storage amount for the DB instance. For SQL Server DB instances, must be a multiple between - 1 and 50 of the storage amount for the DB instance. Amazon Aurora Not applicable. Storage - is managed by the DB cluster. + you can create only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances + in an Active Directory Domain. For more information, see Kerberos Authentication in the + Amazon RDS User Guide. This setting doesn't apply to the following DB instances: Amazon + Aurora (The domain is managed by the DB cluster.) RDS Custom +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to the following DB instances: Amazon + Aurora (The domain is managed by the DB cluster.) RDS Custom +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain +- `"EnableCloudwatchLogsExports"`: The list of log types to enable for exporting to + CloudWatch Logs. For more information, see Publishing Database Logs to Amazon CloudWatch + Logs in the Amazon RDS User Guide. This setting doesn't apply to the following DB + instances: Amazon Aurora (CloudWatch Logs exports are managed by the DB cluster.) RDS + Custom The following values are valid for each DB engine: RDS for Db2 - diag.log | + notify.log RDS for MariaDB - audit | error | general | slowquery RDS for Microsoft + SQL Server - agent | error RDS for MySQL - audit | error | general | slowquery RDS + for Oracle - alert | audit | listener | trace | oemagent RDS for PostgreSQL - postgresql + | upgrade +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. For more information about RDS on + Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS + User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon + Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication for MySQL and + PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to the following DB + instances: Amazon Aurora (Mapping Amazon Web Services IAM accounts to database accounts + is managed by the DB cluster.) RDS Custom +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"EngineLifecycleSupport"`: The life cycle type for this DB instance. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB instance into + Amazon RDS Extended Support. At the end of standard support, you can avoid charges for + Extended Support by setting the value to open-source-rds-extended-support-disabled. In this + case, creating the DB instance will fail if the DB major version is past its end of + standard support date. This setting applies only to RDS for MySQL and RDS for PostgreSQL. + For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. You can + use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS + Extended Support, you can run the selected major engine version on your DB instance past + the end of standard support for that engine version. For more information, see Using Amazon + RDS Extended Support in the Amazon RDS User Guide. Valid Values: + open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: + open-source-rds-extended-support +- `"EngineVersion"`: The version number of the database engine to use. This setting doesn't + apply to Amazon Aurora DB instances. The version number of the database engine the DB + instance uses is managed by the DB cluster. For a list of valid engine versions, use the + DescribeDBEngineVersions operation. The following are the database engines and links to + information about the major and minor versions that are available with Amazon RDS. Not + every database engine is available for every Amazon Web Services Region. Amazon RDS Custom + for Oracle A custom engine version (CEV) that you have previously created. This setting is + required for RDS Custom for Oracle. The CEV name has the following format: + 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating + an RDS Custom for Oracle DB instance in the Amazon RDS User Guide. Amazon RDS Custom for + SQL Server See RDS Custom for SQL Server general requirements in the Amazon RDS User + Guide. RDS for Db2 For information, see Db2 on Amazon RDS versions in the Amazon RDS User + Guide. RDS for MariaDB For information, see MariaDB on Amazon RDS versions in the Amazon + RDS User Guide. RDS for Microsoft SQL Server For information, see Microsoft SQL Server + versions on Amazon RDS in the Amazon RDS User Guide. RDS for MySQL For information, see + MySQL on Amazon RDS versions in the Amazon RDS User Guide. RDS for Oracle For + information, see Oracle Database Engine release notes in the Amazon RDS User Guide. RDS + for PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions in + the Amazon RDS User Guide. +- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to + initially allocate for the DB instance. For information about valid IOPS values, see Amazon + RDS DB instance storage in the Amazon RDS User Guide. This setting doesn't apply to Amazon + Aurora DB instances. Storage is managed by the DB cluster. Constraints: For RDS for Db2, + MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the + storage amount for the DB instance. For RDS for SQL Server - Must be a multiple between 1 + and 50 of the storage amount for the DB instance. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the - key ARN or alias ARN. Amazon Aurora Not applicable. The Amazon Web Services KMS key - identifier is managed by the DB cluster. For more information, see CreateDBCluster. If - StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, - then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web - Services account. Your Amazon Web Services account has a different default KMS key for each - Amazon Web Services Region. Amazon RDS Custom A KMS key is required for RDS Custom - instances. For most RDS engines, if you leave this parameter empty while enabling - StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the - default key when this parameter is empty. You must explicitly specify a key. -- `"LicenseModel"`: License model information for this DB instance. Valid values: - license-included | bring-your-own-license | general-public-license This setting doesn't - apply to RDS Custom. Amazon Aurora Not applicable. -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. - Constraints: Can't manage the master user password with Amazon Web Services Secrets - Manager if MasterUserPassword is specified. -- `"MasterUserPassword"`: The password for the master user. The password can include any - printable ASCII character except \"/\", \"\"\", or \"@\". Amazon Aurora Not applicable. - The password for the master user is managed by the DB cluster. Constraints: Can't be - specified if ManageMasterUserPassword is turned on. MariaDB Constraints: Must contain - from 8 to 41 characters. Microsoft SQL Server Constraints: Must contain from 8 to 128 - characters. MySQL Constraints: Must contain from 8 to 41 characters. Oracle - Constraints: Must contain from 8 to 30 characters. PostgreSQL Constraints: Must contain - from 8 to 128 characters. + key ARN or alias ARN. This setting doesn't apply to Amazon Aurora DB instances. The Amazon + Web Services KMS key identifier is managed by the DB cluster. For more information, see + CreateDBCluster. If StorageEncrypted is enabled, and you do not specify a value for the + KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key + for your Amazon Web Services account. Your Amazon Web Services account has a different + default KMS key for each Amazon Web Services Region. For Amazon RDS Custom, a KMS key is + required for DB instances. For most RDS engines, if you leave this parameter empty while + enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't + use the default key when this parameter is empty. You must explicitly specify a key. +- `"LicenseModel"`: The license model information for this DB instance. License models for + RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model + requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace + model requires an Amazon Web Services Marketplace subscription. For more information, see + RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is + bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB + instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license + RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included + RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | + license-included RDS for PostgreSQL - postgresql-license +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't + manage the master user password with Amazon Web Services Secrets Manager if + MasterUserPassword is specified. +- `"MasterUserPassword"`: The password for the master user. This setting doesn't apply to + Amazon Aurora DB instances. The password for the master user is managed by the DB cluster. + Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include + any printable ASCII character except \"/\", \"\"\", or \"@\". For RDS for Oracle, can't + include the \"&\" (ampersand) or the \"'\" (single quotes) character. Length + Constraints: RDS for Db2 - Must contain from 8 to 255 characters. RDS for MariaDB - + Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 + to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle + - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 + characters. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web @@ -1853,120 +1964,129 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. -- `"MasterUsername"`: The name for the master user. Amazon Aurora Not applicable. The - name for the master user is managed by the DB cluster. Amazon RDS Constraints: - Required. Must be 1 to 16 letters, numbers, or underscores. First character must be a - letter. Can't be a reserved word for the chosen database engine. +- `"MasterUsername"`: The name for the master user. This setting doesn't apply to Amazon + Aurora DB instances. The name for the master user is managed by the DB cluster. This + setting is required for RDS DB instances. Constraints: Must be 1 to 16 letters, numbers, + or underscores. First character must be a letter. Can't be a reserved word for the + chosen database engine. - `"MaxAllocatedStorage"`: The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to - RDS Custom. Amazon Aurora Not applicable. Storage is managed by the DB cluster. + the following DB instances: Amazon Aurora (Storage is managed by the DB cluster.) RDS + Custom - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must set - MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. Valid - Values: 0, 1, 5, 10, 15, 30, 60 + metrics, specify 0. If MonitoringRoleArn is specified, then you must set MonitoringInterval + to a value other than 0. This setting doesn't apply to RDS Custom DB instances. Valid + Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn - value. This setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. You - can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. This - setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. DB instance - Availability Zones (AZs) are managed by the DB cluster. + value. This setting doesn't apply to RDS Custom DB instances. +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. You can't set + the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. This setting + doesn't apply to the following DB instances: Amazon Aurora (DB instance Availability + Zones (AZs) are managed by the DB cluster.) RDS Custom +- `"MultiTenant"`: Specifies whether to use the multi-tenant configuration or the + single-tenant configuration (default). This parameter only applies to RDS for Oracle + container database (CDB) engines. Note the following restrictions: The DB engine that + you specify in the request must support the multi-tenant configuration. If you attempt to + enable the multi-tenant configuration on a DB engine that doesn't support it, the request + fails. If you specify the multi-tenant configuration when you create your DB instance, + you can't later modify this DB instance to use the single-tenant configuration. - `"NcharCharacterSetName"`: The name of the NCHAR character set for the Oracle DB - instance. This parameter doesn't apply to RDS Custom. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB instance. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon RDS User - Guide. -- `"OptionGroupName"`: A value that indicates that the DB instance should be associated - with the specified option group. Permanent options, such as the TDE option for Oracle - Advanced Security TDE, can't be removed from an option group. Also, that option group can't - be removed from a DB instance after it is associated with a DB instance. This setting - doesn't apply to RDS Custom. Amazon Aurora Not applicable. + instance. This setting doesn't apply to RDS Custom DB instances. +- `"NetworkType"`: The network type of the DB instance. The network type is determined by + the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 | DUAL +- `"OptionGroupName"`: The option group to associate the DB instance with. Permanent + options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an + option group. Also, that option group can't be removed from a DB instance after it is + associated with a DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB + instances. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the - key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value - for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a - default KMS key for your Amazon Web Services account. Your Amazon Web Services account has - a different default KMS key for each Amazon Web Services Region. This setting doesn't apply - to RDS Custom. + key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for + PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default + KMS key for your Amazon Web Services account. Your Amazon Web Services account has a + different default KMS key for each Amazon Web Services Region. This setting doesn't apply + to RDS Custom DB instances. - `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. This setting - doesn't apply to RDS Custom. -- `"Port"`: The port number on which the database accepts connections. MySQL Default: - 3306 Valid values: 1150-65535 Type: Integer MariaDB Default: 3306 Valid values: - 1150-65535 Type: Integer PostgreSQL Default: 5432 Valid values: 1150-65535 Type: - Integer Oracle Default: 1521 Valid values: 1150-65535 SQL Server Default: 1433 Valid - values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. Amazon - Aurora Default: 3306 Valid values: 1150-65535 Type: Integer + data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * + 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 + months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention + period that isn't valid, such as 94, Amazon RDS returns an error. +- `"Port"`: The port number on which the database accepts connections. This setting doesn't + apply to Aurora DB instances. The port number is managed by the cluster. Valid Values: + 1150-65535 Default: RDS for Db2 - 50000 RDS for MariaDB - 3306 RDS for Microsoft + SQL Server - 1433 RDS for MySQL - 3306 RDS for Oracle - 1521 RDS for PostgreSQL - + 5432 Constraints: For RDS for Microsoft SQL Server, the value can't be 1234, 1434, + 3260, 3343, 3389, 47001, or 49152-49156. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User - Guide. Amazon Aurora Not applicable. The daily time range for creating automated backups - is managed by the DB cluster. Constraints: Must be in the format hh24:mi-hh24:mi. Must - be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. + Guide. This setting doesn't apply to Amazon Aurora DB instances. The daily time range for + creating automated backups is managed by the DB cluster. Constraints: Must be in the + format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred maintenance window. Must be at least 30 minutes. - `"PreferredMaintenanceWindow"`: The time range each week during which system maintenance - can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS - Maintenance Window. Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window - selected at random from an 8-hour block of time for each Amazon Web Services Region, - occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - Constraints: Minimum 30-minute window. + can occur. For more information, see Amazon RDS Maintenance Window in the Amazon RDS User + Guide. The default is a 30-minute window selected at random from an 8-hour block of time + for each Amazon Web Services Region, occurring on a random day of the week. Constraints: + Must be in the format ddd:hh24:mi-ddd:hh24:mi. The day values must be mon | tue | wed | + thu | fri | sat | sun. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred backup window. Must be at least 30 minutes. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. This setting doesn't apply to RDS Custom. Amazon - Aurora Not applicable. -- `"PromotionTier"`: A value that specifies the order in which an Aurora Replica is - promoted to the primary instance after a failure of the existing primary instance. For more - information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. - This setting doesn't apply to RDS Custom. Default: 1 Valid Values: 0 - 15 -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB instance's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. - Access to the DB instance is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB instance doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the + DB instance class of the DB instance. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"PromotionTier"`: The order of priority in which an Aurora Replica is promoted to the + primary instance after a failure of the existing primary instance. For more information, + see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting + doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB instance's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB instance's VPC. Access to the DB + instance is ultimately controlled by the security group it uses. That public access is not + permitted if the security group assigned to the DB instance doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. Default: The default behavior varies depending on whether + DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public. -- `"StorageEncrypted"`: A value that indicates whether the DB instance is encrypted. By - default, it isn't encrypted. For RDS Custom instances, either set this parameter to true or - leave it unset. If you set this parameter to false, RDS reports an error. Amazon Aurora - Not applicable. The encryption for DB instances is managed by the DB cluster. -- `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This - setting applies only to the gp3 storage type. This setting doesn't apply to RDS Custom or - Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the DB instance. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise - gp2 Amazon Aurora Not applicable. Storage is managed by the DB cluster. +- `"StorageEncrypted"`: Specifes whether the DB instance is encrypted. By default, it isn't + encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. + Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB + instances. The encryption for DB instances is managed by the DB cluster. +- `"StorageThroughput"`: The storage throughput value for the DB instance. This setting + applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"StorageType"`: The storage type to associate with the DB instance. If you specify io1, + io2, or gp3, you must also include a value for the Iops parameter. This setting doesn't + apply to Amazon Aurora DB instances. Storage is managed by the DB cluster. Valid Values: + gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. + Otherwise, gp2. - `"Tags"`: Tags to assign to the DB instance. - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for - TDE encryption. This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. + TDE encryption. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to - access the device. This setting doesn't apply to RDS Custom. + access the device. This setting doesn't apply to RDS Custom DB instances. - `"Timezone"`: The time zone of the DB instance. The time zone parameter is currently - supported only by Microsoft SQL Server. + supported only by RDS for Db2 and RDS for SQL Server. - `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to associate with this - DB instance. Amazon Aurora Not applicable. The associated list of EC2 VPC security groups - is managed by the DB cluster. Default: The default EC2 VPC security group for the DB subnet - group's VPC. + DB instance. This setting doesn't apply to Amazon Aurora DB instances. The associated list + of EC2 VPC security groups is managed by the DB cluster. Default: The default EC2 VPC + security group for the DB subnet group's VPC. """ function create_dbinstance( DBInstanceClass, @@ -2015,16 +2135,16 @@ end create_dbinstance_read_replica(dbinstance_identifier, params::Dict{String,<:Any}) Creates a new DB instance that acts as a read replica for an existing source DB instance or -Multi-AZ DB cluster. You can create a read replica for a DB instance running MySQL, -MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB +Multi-AZ DB cluster. You can create a read replica for a DB instance running Db2, MariaDB, +MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the -Amazon RDS User Guide. Amazon Aurora doesn't support this operation. Call the -CreateDBInstance operation to create a DB instance for an Aurora DB cluster. All read -replica DB instances are created with backups disabled. All other attributes (including DB -security groups and DB parameter groups) are inherited from the source DB instance or -cluster, except as specified. Your source DB instance or cluster must have backup -retention enabled. +Amazon RDS User Guide. Amazon Aurora doesn't support this operation. To create a DB +instance for an Aurora DB cluster, use the CreateDBInstance operation. All read replica DB +instances are created with backups disabled. All other attributes (including DB security +groups and DB parameter groups) are inherited from the source DB instance or cluster, +except as specified. Your source DB instance or cluster must have backup retention +enabled. # Arguments - `dbinstance_identifier`: The DB instance identifier of the read replica. This identifier @@ -2037,75 +2157,95 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the read replica during the maintenance window. This setting - doesn't apply to RDS Custom. Default: Inherits from the source DB instance +- `"AutoMinorVersionUpgrade"`: Specifies whether to automatically apply minor engine + upgrades to the read replica during the maintenance window. This setting doesn't apply to + RDS Custom DB instances. Default: Inherits the value from the source DB instance. - `"AvailabilityZone"`: The Availability Zone (AZ) where the read replica will be created. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the read - replica to snapshots of the read replica. By default, tags are not copied. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the read replica's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the read replica to + snapshots of the read replica. By default, tags aren't copied. - `"CustomIamInstanceProfile"`: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. - This setting is required for RDS Custom. + This setting is required for RDS Custom DB instances. - `"DBInstanceClass"`: The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for - your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits from the - source DB instance. + your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the + value from the source DB instance. - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB - instance. If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the - DBParameterGroup of source DB instance for a same Region read replica, or the default + instance. If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the + DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a - parameter group for this operation is only supported for MySQL and Oracle DB instances. It - isn't supported for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or - hyphens. First character must be a letter Can't end with a hyphen or contain two - consecutive hyphens -- `"DBSubnetGroupName"`: Specifies a DB subnet group for the DB instance. The new DB - instance is created in the VPC associated with the DB subnet group. If no DB subnet group - is specified, then the new DB instance isn't created in a VPC. Constraints: If supplied, - must match the name of an existing DBSubnetGroup. The specified DB subnet group must be - in the same Amazon Web Services Region in which the operation is running. All read - replicas in one Amazon Web Services Region that are created from the same source DB - instance must either:> Specify DB subnet groups from the same VPC. All these read - replicas are created in the same VPC. Not specify a DB subnet group. All these read - replicas are created outside of any VPC. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. + parameter group for this operation is only supported for MySQL DB instances for + cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB + instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 + letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen + or contain two consecutive hyphens. +- `"DBSubnetGroupName"`: A DB subnet group for the DB instance. The new DB instance is + created in the VPC associated with the DB subnet group. If no DB subnet group is specified, + then the new DB instance isn't created in a VPC. Constraints: If supplied, must match the + name of an existing DB subnet group. The specified DB subnet group must be in the same + Amazon Web Services Region in which the operation is running. All read replicas in one + Amazon Web Services Region that are created from the same source DB instance must either: + Specify DB subnet groups from the same VPC. All these read replicas are created in the same + VPC. Not specify a DB subnet group. All these read replicas are created outside of any + VPC. Example: mydbsubnetgroup +- `"DedicatedLogVolume"`: Indicates whether the DB instance has a dedicated log volume + (DLV) enabled. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + instance. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. - `"Domain"`: The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon - RDS User Guide. This setting doesn't apply to RDS Custom. -- `"DomainIAMRoleName"`: The name of the IAM role to be used when making API calls to the - Directory Service. This setting doesn't apply to RDS Custom. + RDS User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User - Guide. This setting doesn't apply to RDS Custom. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts read replica. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the read replica from - outside of its virtual private cloud (VPC) on your local network. For more information - about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the - Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in - the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information about IAM database authentication, see - IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This - setting doesn't apply to RDS Custom. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the read replica. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be - initially allocated for the DB instance. + Guide. This setting doesn't apply to RDS Custom DB instances. +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts read replica. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the read replica from outside of its + virtual private cloud (VPC) on your local network. For more information about RDS on + Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS + User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon + Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information about IAM database authentication, see IAM + Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting + doesn't apply to RDS Custom DB instances. +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + read replica. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to + initially allocate for the DB instance. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted read replica. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you create an encrypted read replica in the same Amazon Web Services @@ -2123,45 +2263,44 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring - metrics are collected for the read replica. To disable collecting Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also - set MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. - Valid Values: 0, 1, 5, 10, 15, 30, 60 + metrics are collected for the read replica. To disable collection of Enhanced Monitoring + metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must set + MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom DB + instances. Valid Values: 0, 1, 5, 10, 15, 30, 60 Default: 0 - `"MonitoringRoleArn"`: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a - MonitoringRoleArn value. This setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the read replica is in a Multi-AZ deployment. - You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your - replica in another Availability Zone for failover support for the replica. Creating your - read replica as a Multi-AZ DB instance is independent of whether the source is a Multi-AZ - DB instance or a Multi-AZ DB cluster. This setting doesn't apply to RDS Custom. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL + MonitoringRoleArn value. This setting doesn't apply to RDS Custom DB instances. +- `"MultiAZ"`: Specifies whether the read replica is in a Multi-AZ deployment. You can + create a read replica as a Multi-AZ DB instance. RDS creates a standby of your replica in + another Availability Zone for failover support for the replica. Creating your read replica + as a Multi-AZ DB instance is independent of whether the source is a Multi-AZ DB instance or + a Multi-AZ DB cluster. This setting doesn't apply to RDS Custom DB instances. +- `"NetworkType"`: The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for read replica. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. -- `"OptionGroupName"`: The option group the DB instance is associated with. If omitted, the - option group associated with the source instance or cluster is used. For SQL Server, you - must use the option group associated with the source. This setting doesn't apply to RDS - Custom. +- `"OptionGroupName"`: The option group to associate the DB instance with. If not + specified, RDS uses the option group associated with the source DB instance or cluster. + For SQL Server, you must use the option group associated with the source. This setting + doesn't apply to RDS Custom DB instances. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. This setting doesn't apply - to RDS Custom. + to RDS Custom DB instances. - `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. This setting - doesn't apply to RDS Custom. -- `"Port"`: The port number that the DB instance uses for connections. Default: Inherits - from the source DB instance Valid Values: 1150-65535 + data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * + 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 + months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention + period that isn't valid, such as 94, Amazon RDS returns an error. +- `"Port"`: The port number that the DB instance uses for connections. Valid Values: + 1150-65535 Default: Inherits the value from the source DB instance. - `"PreSignedUrl"`: When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the @@ -2202,19 +2341,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region - read replicas. This setting doesn't apply to RDS Custom.If you supply a value for this - operation's SourceRegion parameter, a pre-signed URL will be calculated on your behalf. + read replicas. This setting doesn't apply to RDS Custom DB instances.If you supply a value + for this operation's SourceRegion parameter, a pre-signed URL will be calculated on your + behalf. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. This setting doesn't apply to RDS Custom. -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. For more information, see - CreateDBInstance. + DB instance class of the DB instance. This setting doesn't apply to RDS Custom DB instances. +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB cluster's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB + cluster is ultimately controlled by the security group it uses. That public access isn't + permitted if the security group assigned to the DB cluster doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. For more information, see CreateDBInstance. - `"ReplicaMode"`: The open mode of the replica database: mounted or read-only. This parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region @@ -2235,32 +2374,34 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceDBInstanceIdentifier"`: The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five. Constraints: Must be the - identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance. - Can't be specified if the SourceDBClusterIdentifier parameter is also specified. For the - limitations of Oracle read replicas, see Version and licensing considerations for RDS for - Oracle replicas in the Amazon RDS User Guide. For the limitations of SQL Server read - replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide. The - specified DB instance must have automatic backups enabled, that is, its backup retention - period must be greater than 0. If the source DB instance is in the same Amazon Web - Services Region as the read replica, specify a valid DB instance identifier. If the - source DB instance is in a different Amazon Web Services Region from the read replica, - specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon - RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which - don't support cross-Region replicas. + identifier of an existing Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server DB + instance. Can't be specified if the SourceDBClusterIdentifier parameter is also + specified. For the limitations of Oracle read replicas, see Version and licensing + considerations for RDS for Oracle replicas in the Amazon RDS User Guide. For the + limitations of SQL Server read replicas, see Read replica limitations with SQL Server in + the Amazon RDS User Guide. The specified DB instance must have automatic backups enabled, + that is, its backup retention period must be greater than 0. If the source DB instance is + in the same Amazon Web Services Region as the read replica, specify a valid DB instance + identifier. If the source DB instance is in a different Amazon Web Services Region from + the read replica, specify a valid DB instance ARN. For more information, see Constructing + an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS + Custom, which don't support cross-Region replicas. - `"SourceRegion"`: The ID of the region that contains the source for the read replica. - `"StorageThroughput"`: Specifies the storage throughput value for the read replica. This - setting doesn't apply to RDS Custom or Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the read replica. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise - gp2 + setting doesn't apply to RDS Custom or Amazon Aurora DB instances. +- `"StorageType"`: The storage type to associate with the read replica. If you specify io1, + io2, or gp3, you must also include a value for the Iops parameter. Valid Values: gp2 | gp3 + | io1 | io2 | standard Default: io1 if the Iops parameter is specified. Otherwise, gp2. - `"Tags"`: -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. This setting doesn't apply to RDS - Custom. +- `"UpgradeStorageConfig"`: Whether to upgrade the storage file system configuration on the + read replica. This option migrates the read replica from the old storage file system layout + to the preferred layout. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. This setting doesn't apply to RDS Custom DB + instances. - `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to associate with the - read replica. This setting doesn't apply to RDS Custom. Default: The default EC2 VPC - security group for the DB subnet group's VPC. + read replica. This setting doesn't apply to RDS Custom DB instances. Default: The default + EC2 VPC security group for the DB subnet group's VPC. """ function create_dbinstance_read_replica( DBInstanceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -2323,9 +2464,9 @@ modified. command: aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql The output contains duplicates. The following are the valid DB engine values: aurora-mysql - aurora-postgresql mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 - oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex - sqlserver-web + aurora-postgresql db2-ae db2-se mysql oracle-ee oracle-ee-cdb + oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se + sqlserver-ex sqlserver-web - `dbparameter_group_name`: The name of the DB parameter group. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens This value is stored as a lowercase string. @@ -2400,18 +2541,18 @@ Creates a new DB proxy. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DebugLogging"`: Whether the proxy includes detailed information about SQL statements in - its logs. This information helps you to debug issues involving SQL behavior or the - performance and scalability of the proxy connections. The debug information includes the - text of SQL statements that you submit through the proxy. Thus, only enable this setting - when needed for debugging, and only when you have security measures in place to safeguard - any sensitive information that appears in the logs. +- `"DebugLogging"`: Specifies whether the proxy includes detailed information about SQL + statements in its logs. This information helps you to debug issues involving SQL behavior + or the performance and scalability of the proxy connections. The debug information includes + the text of SQL statements that you submit through the proxy. Thus, only enable this + setting when needed for debugging, and only when you have security measures in place to + safeguard any sensitive information that appears in the logs. - `"IdleClientTimeout"`: The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. -- `"RequireTLS"`: A Boolean parameter that specifies whether Transport Layer Security (TLS) - encryption is required for connections to the proxy. By enabling this setting, you can - enforce encrypted TLS connections to the proxy. +- `"RequireTLS"`: Specifies whether Transport Layer Security (TLS) encryption is required + for connections to the proxy. By enabling this setting, you can enforce encrypted TLS + connections to the proxy. - `"Tags"`: An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy. - `"VpcSecurityGroupIds"`: One or more VPC security group IDs to associate with the new @@ -2486,9 +2627,9 @@ VPC than the proxy's default VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Tags"`: -- `"TargetRole"`: A value that indicates whether the DB proxy endpoint can be used for - read/write or read-only operations. The default is READ_WRITE. The only role that proxies - for RDS for Microsoft SQL Server support is READ_WRITE. +- `"TargetRole"`: The role of the DB proxy endpoint. The role determines whether the + endpoint can be used for read/write or only read operations. The default is READ_WRITE. The + only role that proxies for RDS for Microsoft SQL Server support is READ_WRITE. - `"VpcSecurityGroupIds"`: The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC. @@ -2596,6 +2737,86 @@ function create_dbsecurity_group( ) end +""" + create_dbshard_group(dbcluster_identifier, dbshard_group_identifier, max_acu) + create_dbshard_group(dbcluster_identifier, dbshard_group_identifier, max_acu, params::Dict{String,<:Any}) + +Creates a new DB shard group for Aurora Limitless Database. You must enable Aurora +Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only + +# Arguments +- `dbcluster_identifier`: The name of the primary DB cluster for the DB shard group. +- `dbshard_group_identifier`: The name of the DB shard group. +- `max_acu`: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ComputeRedundancy"`: Specifies whether to create standby instances for the DB shard + group. Valid values are the following: 0 - Creates a single, primary DB instance for each + physical shard. This is the default value, and the only one supported for the preview. 1 + - Creates a primary DB instance and a standby instance in a different Availability Zone + (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances + in different AZs for each physical shard. +- `"PubliclyAccessible"`: Specifies whether the DB shard group is publicly accessible. When + the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves + to the private IP address from within the DB shard group's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB shard group's VPC. Access to the + DB shard group is ultimately controlled by the security group it uses. That public access + is not permitted if the security group assigned to the DB shard group doesn't permit it. + When the DB shard group isn't publicly accessible, it is an internal DB shard group with a + DNS name that resolves to a private IP address. Default: The default behavior varies + depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, + and PubliclyAccessible isn't specified, the following applies: If the default VPC in the + target Region doesn’t have an internet gateway attached to it, the DB shard group is + private. If the default VPC in the target Region has an internet gateway attached to it, + the DB shard group is public. If DBSubnetGroupName is specified, and PubliclyAccessible + isn't specified, the following applies: If the subnets are part of a VPC that doesn’t + have an internet gateway attached to it, the DB shard group is private. If the subnets + are part of a VPC that has an internet gateway attached to it, the DB shard group is + public. +""" +function create_dbshard_group( + DBClusterIdentifier, + DBShardGroupIdentifier, + MaxACU; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "CreateDBShardGroup", + Dict{String,Any}( + "DBClusterIdentifier" => DBClusterIdentifier, + "DBShardGroupIdentifier" => DBShardGroupIdentifier, + "MaxACU" => MaxACU, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_dbshard_group( + DBClusterIdentifier, + DBShardGroupIdentifier, + MaxACU, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "CreateDBShardGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DBClusterIdentifier" => DBClusterIdentifier, + "DBShardGroupIdentifier" => DBShardGroupIdentifier, + "MaxACU" => MaxACU, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_dbsnapshot(dbinstance_identifier, dbsnapshot_identifier) create_dbsnapshot(dbinstance_identifier, dbsnapshot_identifier, params::Dict{String,<:Any}) @@ -2739,14 +2960,17 @@ RDS event notification in the Amazon Aurora User Guide. # Arguments - `sns_topic_arn`: The Amazon Resource Name (ARN) of the SNS topic created for event - notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it. + notification. SNS automatically creates the ARN when you create a topic and subscribe to + it. RDS doesn't support FIFO (first in, first out) topics. For more information, see + Message ordering and deduplication (FIFO topics) in the Amazon Simple Notification Service + Developer Guide. - `subscription_name`: The name of the subscription. Constraints: The name must be less than 255 characters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Enabled"`: A value that indicates whether to activate the subscription. If the event - notification subscription isn't activated, the subscription is created but not active. +- `"Enabled"`: Specifies whether to activate the subscription. If the event notification + subscription isn't activated, the subscription is created but not active. - `"EventCategories"`: A list of event categories for a particular source type (SourceType) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the Amazon RDS User @@ -2767,8 +2991,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceType"`: The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all - events are returned. Valid values: db-instance | db-cluster | db-parameter-group | - db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | + db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | + custom-engine-version | blue-green-deployment - `"Tags"`: """ function create_event_subscription( @@ -2813,25 +3038,48 @@ Creates an Aurora global database spread across multiple Amazon Web Services Reg global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem. You can create a global database -that is initially empty, and then add a primary cluster and a secondary cluster to it. Or -you can specify an existing Aurora cluster during the create operation, and this cluster -becomes the primary cluster of the global database. This action applies only to Aurora DB -clusters. +that is initially empty, and then create the primary and secondary DB clusters in the +global database. Or you can specify an existing Aurora cluster during the create operation, +and this cluster becomes the primary cluster of the global database. This operation +applies only to Aurora DB clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. If you - do not provide a name, Amazon Aurora will not create a database in the global database - cluster you are creating. -- `"DeletionProtection"`: The deletion protection setting for the new global database. The - global database can't be deleted when deletion protection is enabled. -- `"Engine"`: The name of the database engine to be used for this DB cluster. -- `"EngineVersion"`: The engine version of the Aurora global database. -- `"GlobalClusterIdentifier"`: The cluster identifier of the new global database cluster. + don't specify a name, Amazon Aurora doesn't create a database in the global database + cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In + this case, Amazon Aurora uses the database name from the source DB cluster. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the new + global database cluster. The global database can't be deleted when deletion protection is + enabled. +- `"Engine"`: The database engine to use for this global database cluster. Valid Values: + aurora-mysql | aurora-postgresql Constraints: Can't be specified if + SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine of the + source DB cluster. +- `"EngineLifecycleSupport"`: The life cycle type for this global database cluster. By + default, this value is set to open-source-rds-extended-support, which enrolls your global + cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid + charges for Extended Support by setting the value to + open-source-rds-extended-support-disabled. In this case, creating the global cluster will + fail if the DB major version is past its end of standard support date. This setting only + applies to Aurora PostgreSQL-based global databases. You can use this setting to enroll + your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can + run the selected major engine version on your global cluster past the end of standard + support for that engine version. For more information, see Using Amazon RDS Extended + Support in the Amazon Aurora User Guide. Valid Values: open-source-rds-extended-support | + open-source-rds-extended-support-disabled Default: open-source-rds-extended-support +- `"EngineVersion"`: The engine version to use for this global database cluster. + Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, + Amazon Aurora uses the engine version of the source DB cluster. +- `"GlobalClusterIdentifier"`: The cluster identifier for this global database cluster. This parameter is stored as a lowercase string. - `"SourceDBClusterIdentifier"`: The Amazon Resource Name (ARN) to use as the primary - cluster of the global database. This parameter is optional. -- `"StorageEncrypted"`: The storage encryption setting for the new global database cluster. + cluster of the global database. If you provide a value for this parameter, don't specify + values for the following settings because Amazon Aurora uses the values from the specified + source DB cluster: DatabaseName Engine EngineVersion StorageEncrypted +- `"StorageEncrypted"`: Specifies whether to enable storage encryption for the new global + database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is + specified. In this case, Amazon Aurora uses the setting from the source DB cluster. """ function create_global_cluster(; aws_config::AbstractAWSConfig=global_aws_config()) return rds( @@ -2849,6 +3097,72 @@ function create_global_cluster( ) end +""" + create_integration(integration_name, source_arn, target_arn) + create_integration(integration_name, source_arn, target_arn, params::Dict{String,<:Any}) + +Creates a zero-ETL integration with Amazon Redshift. + +# Arguments +- `integration_name`: The name of the integration. +- `source_arn`: The Amazon Resource Name (ARN) of the database to use as the source for + replication. +- `target_arn`: The ARN of the Redshift data warehouse to use as the target for replication. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AdditionalEncryptionContext"`: An optional set of non-secret key–value pairs that + contains additional contextual information about the data. For more information, see + Encryption context in the Amazon Web Services Key Management Service Developer Guide. You + can only include this parameter if you specify the KMSKeyId parameter. +- `"DataFilter"`: Data filtering options for the integration. For more information, see + Data filtering for Aurora zero-ETL integrations with Amazon Redshift. Valid for: + Integrations with Aurora MySQL source DB clusters only +- `"Description"`: A description of the integration. +- `"KMSKeyId"`: The Amazon Web Services Key Management System (Amazon Web Services KMS) key + identifier for the key to use to encrypt the integration. If you don't specify an + encryption key, RDS uses a default Amazon Web Services owned key. +- `"Tags"`: +""" +function create_integration( + IntegrationName, SourceArn, TargetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "CreateIntegration", + Dict{String,Any}( + "IntegrationName" => IntegrationName, + "SourceArn" => SourceArn, + "TargetArn" => TargetArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_integration( + IntegrationName, + SourceArn, + TargetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "CreateIntegration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IntegrationName" => IntegrationName, + "SourceArn" => SourceArn, + "TargetArn" => TargetArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_option_group(engine_name, major_engine_version, option_group_description, option_group_name) create_option_group(engine_name, major_engine_version, option_group_description, option_group_name, params::Dict{String,<:Any}) @@ -2857,10 +3171,10 @@ Creates a new option group. You can create up to 20 option groups. This command apply to RDS Custom. # Arguments -- `engine_name`: Specifies the name of the engine that this option group should be - associated with. Valid Values: mariadb mysql oracle-ee oracle-ee-cdb - oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se - sqlserver-ex sqlserver-web +- `engine_name`: The name of the engine to associate this option group with. Valid Values: + db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 + oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex + sqlserver-web - `major_engine_version`: Specifies the major version of the engine that this option group should be associated with. - `option_group_description`: The description of the option group. @@ -2918,24 +3232,98 @@ function create_option_group( ) end +""" + create_tenant_database(dbinstance_identifier, master_user_password, master_username, tenant_dbname) + create_tenant_database(dbinstance_identifier, master_user_password, master_username, tenant_dbname, params::Dict{String,<:Any}) + +Creates a tenant database in a DB instance that uses the multi-tenant configuration. Only +RDS for Oracle container database (CDB) instances are supported. + +# Arguments +- `dbinstance_identifier`: The user-supplied DB instance identifier. RDS creates your + tenant database in this DB instance. This parameter isn't case-sensitive. +- `master_user_password`: The password for the master user in your tenant database. + Constraints: Must be 8 to 30 characters. Can include any printable ASCII character + except forward slash (/), double quote (\"), at symbol (@), ampersand (&), or single + quote ('). +- `master_username`: The name for the master user account in your tenant database. RDS + creates this user account in the tenant database and grants privileges to the master user. + This parameter is case-sensitive. Constraints: Must be 1 to 16 letters, numbers, or + underscores. First character must be a letter. Can't be a reserved word for the chosen + database engine. +- `tenant_dbname`: The user-supplied name of the tenant database that you want to create in + your DB instance. This parameter has the same constraints as DBName in CreateDBInstance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CharacterSetName"`: The character set for your tenant database. If you don't specify a + value, the character set name defaults to AL32UTF8. +- `"NcharCharacterSetName"`: The NCHAR value for the tenant database. +- `"Tags"`: +""" +function create_tenant_database( + DBInstanceIdentifier, + MasterUserPassword, + MasterUsername, + TenantDBName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "CreateTenantDatabase", + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, + "MasterUserPassword" => MasterUserPassword, + "MasterUsername" => MasterUsername, + "TenantDBName" => TenantDBName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_tenant_database( + DBInstanceIdentifier, + MasterUserPassword, + MasterUsername, + TenantDBName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "CreateTenantDatabase", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, + "MasterUserPassword" => MasterUserPassword, + "MasterUsername" => MasterUsername, + "TenantDBName" => TenantDBName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_blue_green_deployment(blue_green_deployment_identifier) delete_blue_green_deployment(blue_green_deployment_identifier, params::Dict{String,<:Any}) Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green -Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS +Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Arguments -- `blue_green_deployment_identifier`: The blue/green deployment identifier of the - deployment to be deleted. This parameter isn't case-sensitive. Constraints: Must match - an existing blue/green deployment identifier. +- `blue_green_deployment_identifier`: The unique identifier of the blue/green deployment to + delete. This parameter isn't case-sensitive. Constraints: Must match an existing + blue/green deployment identifier. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeleteTarget"`: A value that indicates whether to delete the resources in the green - environment. You can't specify this option if the blue/green deployment status is - SWITCHOVER_COMPLETED. +- `"DeleteTarget"`: Specifies whether to delete the resources in the green environment. You + can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED. """ function delete_blue_green_deployment( BlueGreenDeploymentIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -2985,8 +3373,8 @@ DeleteCustomDbEngineVersion event. For more information, see Deleting a CEV in RDS User Guide. # Arguments -- `engine`: The database engine. The only supported engines are custom-oracle-ee and - custom-oracle-ee-cdb. +- `engine`: The database engine. RDS Custom for Oracle supports the following values: + custom-oracle-ee custom-oracle-ee-cdb custom-oracle-se2 custom-oracle-se2-cdb - `engine_version`: The custom engine version (CEV) for your DB instance. This option is required for RDS Custom, but optional for Amazon RDS. The combination of Engine and EngineVersion is unique per customer per Amazon Web Services Region. @@ -3040,17 +3428,20 @@ clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeleteAutomatedBackups"`: Specifies whether to remove automated backups immediately + after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to + remove automated backups immediately after the DB cluster is deleted. - `"FinalDBSnapshotIdentifier"`: The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens -- `"SkipFinalSnapshot"`: A value that indicates whether to skip the creation of a final DB - cluster snapshot before the DB cluster is deleted. If skip is specified, no DB cluster - snapshot is created. If skip isn't specified, a DB cluster snapshot is created before the - DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is - created. By default, this parameter is disabled. You must specify a - FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled. +- `"SkipFinalSnapshot"`: Specifies whether to skip the creation of a final DB cluster + snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is + created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is + deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By + default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier + parameter if SkipFinalSnapshot is disabled. """ function delete_dbcluster( DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -3081,6 +3472,47 @@ function delete_dbcluster( ) end +""" + delete_dbcluster_automated_backup(db_cluster_resource_id) + delete_dbcluster_automated_backup(db_cluster_resource_id, params::Dict{String,<:Any}) + +Deletes automated backups using the DbClusterResourceId value of the source DB cluster or +the Amazon Resource Name (ARN) of the automated backups. + +# Arguments +- `db_cluster_resource_id`: The identifier for the source DB cluster, which can't be + changed and which is unique to an Amazon Web Services Region. + +""" +function delete_dbcluster_automated_backup( + DbClusterResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DeleteDBClusterAutomatedBackup", + Dict{String,Any}("DbClusterResourceId" => DbClusterResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_dbcluster_automated_backup( + DbClusterResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "DeleteDBClusterAutomatedBackup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DbClusterResourceId" => DbClusterResourceId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_dbcluster_endpoint(dbcluster_endpoint_identifier) delete_dbcluster_endpoint(dbcluster_endpoint_identifier, params::Dict{String,<:Any}) @@ -3220,21 +3652,24 @@ end delete_dbinstance(dbinstance_identifier) delete_dbinstance(dbinstance_identifier, params::Dict{String,<:Any}) -The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a -DB instance, all automated backups for that instance are deleted and can't be recovered. -Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted. -If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting -until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the -status of this operation. The action can't be canceled or reverted once submitted. When a -DB instance is in a failure state and has a status of failed, incompatible-restore, or -incompatible-network, you can only delete it when you skip creation of the final snapshot -with the SkipFinalSnapshot parameter. If the specified DB instance is part of an Amazon -Aurora DB cluster, you can't delete the DB instance if both of the following conditions are -true: The DB cluster is a read replica of another Amazon Aurora DB cluster. The DB -instance is the only instance in the DB cluster. To delete a DB instance in this case, -first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no -longer a read replica. After the promotion completes, then call the DeleteDBInstance API -action to delete the final instance in the DB cluster. +Deletes a previously provisioned DB instance. When you delete a DB instance, all automated +backups for that instance are deleted and can't be recovered. However, manual DB snapshots +of the DB instance aren't deleted. If you request a final DB snapshot, the status of the +Amazon RDS DB instance is deleting until the DB snapshot is created. This operation can't +be canceled or reverted after it begins. To monitor the status of this operation, use +DescribeDBInstance. When a DB instance is in a failure state and has a status of failed, +incompatible-restore, or incompatible-network, you can only delete it when you skip +creation of the final snapshot with the SkipFinalSnapshot parameter. If the specified DB +instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both +of the following conditions are true: The DB cluster is a read replica of another Amazon +Aurora DB cluster. The DB instance is the only instance in the DB cluster. To delete a +DB instance in this case, first use the PromoteReadReplicaDBCluster operation to promote +the DB cluster so that it's no longer a read replica. After the promotion completes, use +the DeleteDBInstance operation to delete the final instance in the DB cluster. For RDS +Custom DB instances, deleting the DB instance permanently deletes the EC2 instance and the +associated EBS volumes. Make sure that you don't terminate or delete these resources before +you delete the DB instance. Otherwise, deleting the DB instance and creation of the final +snapshot might fail. # Arguments - `dbinstance_identifier`: The DB instance identifier for the DB instance to be deleted. @@ -3243,18 +3678,18 @@ action to delete the final instance in the DB cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeleteAutomatedBackups"`: A value that indicates whether to remove automated backups - immediately after the DB instance is deleted. This parameter isn't case-sensitive. The - default is to remove automated backups immediately after the DB instance is deleted. +- `"DeleteAutomatedBackups"`: Specifies whether to remove automated backups immediately + after the DB instance is deleted. This parameter isn't case-sensitive. The default is to + remove automated backups immediately after the DB instance is deleted. - `"FinalDBSnapshotIdentifier"`: The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot parameter is disabled. If you enable this parameter and also enable SkipFinalShapshot, the command results in an error. This setting doesn't apply to RDS Custom. Constraints: Must be 1 to 255 letters or numbers. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Can't be specified when deleting a read replica. -- `"SkipFinalSnapshot"`: A value that indicates whether to skip the creation of a final DB - snapshot before deleting the instance. If you enable this parameter, RDS doesn't create a - DB snapshot. If you don't enable this parameter, RDS creates a DB snapshot before the DB +- `"SkipFinalSnapshot"`: Specifies whether to skip the creation of a final DB snapshot + before deleting the instance. If you enable this parameter, RDS doesn't create a DB + snapshot. If you don't enable this parameter, RDS creates a DB snapshot before the DB instance is deleted. By default, skip isn't enabled, and the DB snapshot is created. If you don't enable this parameter, you must specify the FinalDBSnapshotIdentifier parameter. When a DB instance is in a failure state and has a status of failed, incompatible-restore, @@ -3462,27 +3897,66 @@ not in a VPC into a VPC in the Amazon RDS User Guide. consecutive hyphens Must not be \"Default\" """ -function delete_dbsecurity_group( - DBSecurityGroupName; aws_config::AbstractAWSConfig=global_aws_config() +function delete_dbsecurity_group( + DBSecurityGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DeleteDBSecurityGroup", + Dict{String,Any}("DBSecurityGroupName" => DBSecurityGroupName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_dbsecurity_group( + DBSecurityGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "DeleteDBSecurityGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DBSecurityGroupName" => DBSecurityGroupName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_dbshard_group(dbshard_group_identifier) + delete_dbshard_group(dbshard_group_identifier, params::Dict{String,<:Any}) + +Deletes an Aurora Limitless Database DB shard group. + +# Arguments +- `dbshard_group_identifier`: Teh name of the DB shard group to delete. + +""" +function delete_dbshard_group( + DBShardGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return rds( - "DeleteDBSecurityGroup", - Dict{String,Any}("DBSecurityGroupName" => DBSecurityGroupName); + "DeleteDBShardGroup", + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_dbsecurity_group( - DBSecurityGroupName, +function delete_dbshard_group( + DBShardGroupIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return rds( - "DeleteDBSecurityGroup", + "DeleteDBShardGroup", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("DBSecurityGroupName" => DBSecurityGroupName), + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier), params, ), ); @@ -3651,6 +4125,45 @@ function delete_global_cluster( ) end +""" + delete_integration(integration_identifier) + delete_integration(integration_identifier, params::Dict{String,<:Any}) + +Deletes a zero-ETL integration with Amazon Redshift. + +# Arguments +- `integration_identifier`: The unique identifier of the integration. + +""" +function delete_integration( + IntegrationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DeleteIntegration", + Dict{String,Any}("IntegrationIdentifier" => IntegrationIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_integration( + IntegrationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "DeleteIntegration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IntegrationIdentifier" => IntegrationIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_option_group(option_group_name) delete_option_group(option_group_name, params::Dict{String,<:Any}) @@ -3689,6 +4202,68 @@ function delete_option_group( ) end +""" + delete_tenant_database(dbinstance_identifier, tenant_dbname) + delete_tenant_database(dbinstance_identifier, tenant_dbname, params::Dict{String,<:Any}) + +Deletes a tenant database from your DB instance. This command only applies to RDS for +Oracle container database (CDB) instances. You can't delete a tenant database when it is +the only tenant in the DB instance. + +# Arguments +- `dbinstance_identifier`: The user-supplied identifier for the DB instance that contains + the tenant database that you want to delete. +- `tenant_dbname`: The user-supplied name of the tenant database that you want to remove + from your DB instance. Amazon RDS deletes the tenant database with this name. This + parameter isn’t case-sensitive. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FinalDBSnapshotIdentifier"`: The DBSnapshotIdentifier of the new DBSnapshot created + when the SkipFinalSnapshot parameter is disabled. If you enable this parameter and also + enable SkipFinalShapshot, the command results in an error. +- `"SkipFinalSnapshot"`: Specifies whether to skip the creation of a final DB snapshot + before removing the tenant database from your DB instance. If you enable this parameter, + RDS doesn't create a DB snapshot. If you don't enable this parameter, RDS creates a DB + snapshot before it deletes the tenant database. By default, RDS doesn't skip the final + snapshot. If you don't enable this parameter, you must specify the + FinalDBSnapshotIdentifier parameter. +""" +function delete_tenant_database( + DBInstanceIdentifier, TenantDBName; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DeleteTenantDatabase", + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, "TenantDBName" => TenantDBName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_tenant_database( + DBInstanceIdentifier, + TenantDBName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "DeleteTenantDatabase", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, + "TenantDBName" => TenantDBName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ deregister_dbproxy_targets(dbproxy_name) deregister_dbproxy_targets(dbproxy_name, params::Dict{String,<:Any}) @@ -3761,33 +4336,33 @@ end describe_blue_green_deployments() describe_blue_green_deployments(params::Dict{String,<:Any}) -Returns information about blue/green deployments. For more information, see Using Amazon -RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using -Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. +Describes one or more blue/green deployments. For more information, see Using Amazon RDS +Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon +RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BlueGreenDeploymentIdentifier"`: The blue/green deployment identifier. If this - parameter is specified, information from only the specific blue/green deployment is - returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an - existing blue/green deployment identifier. +- `"BlueGreenDeploymentIdentifier"`: The blue/green deployment identifier. If you specify + this parameter, the response only includes information about the specific blue/green + deployment. This parameter isn't case-sensitive. Constraints: Must match an existing + blue/green deployment identifier. - `"Filters"`: A filter that specifies one or more blue/green deployments to describe. - Supported filters: blue-green-deployment-identifier - Accepts system-generated - identifiers for blue/green deployments. The results list only includes information about - the blue/green deployments with the specified identifiers. blue-green-deployment-name - - Accepts user-supplied names for blue/green deployments. The results list only includes - information about the blue/green deployments with the specified names. source - Accepts - source databases for a blue/green deployment. The results list only includes information - about the blue/green deployments with the specified source databases. target - Accepts - target databases for a blue/green deployment. The results list only includes information - about the blue/green deployments with the specified target databases. + Valid Values: blue-green-deployment-identifier - Accepts system-generated identifiers + for blue/green deployments. The results list only includes information about the blue/green + deployments with the specified identifiers. blue-green-deployment-name - Accepts + user-supplied names for blue/green deployments. The results list only includes information + about the blue/green deployments with the specified names. source - Accepts source + databases for a blue/green deployment. The results list only includes information about the + blue/green deployments with the specified source databases. target - Accepts target + databases for a blue/green deployment. The results list only includes information about the + blue/green deployments with the specified target databases. - `"Marker"`: An optional pagination token provided by a previous - DescribeBlueGreenDeployments request. If this parameter is specified, the response includes - only records beyond the marker, up to the value specified by MaxRecords. + DescribeBlueGreenDeployments request. If you specify this parameter, the response only + includes records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so you can retrieve the remaining results. Default: 100 Constraints: - Minimum 20, maximum 100. + in the response so you can retrieve the remaining results. Default: 100 Constraints: Must + be a minimum of 20. Can't exceed 100. """ function describe_blue_green_deployments(; aws_config::AbstractAWSConfig=global_aws_config() @@ -3813,10 +4388,10 @@ end describe_certificates() describe_certificates(params::Dict{String,<:Any}) -Lists the set of CA certificates provided by Amazon RDS for this Amazon Web Services -account. For more information, see Using SSL/TLS to encrypt a connection to a DB instance -in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in -the Amazon Aurora User Guide. +Lists the set of certificate authority (CA) certificates provided by Amazon RDS for this +Amazon Web Services account. For more information, see Using SSL/TLS to encrypt a +connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a +connection to a DB cluster in the Amazon Aurora User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3848,6 +4423,57 @@ function describe_certificates( ) end +""" + describe_dbcluster_automated_backups() + describe_dbcluster_automated_backups(params::Dict{String,<:Any}) + +Displays backups for both current and deleted DB clusters. For example, use this operation +to find details about automated backups for previously deleted clusters. Current clusters +are returned for both the DescribeDBClusterAutomatedBackups and DescribeDBClusters +operations. All parameters are optional. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DBClusterIdentifier"`: (Optional) The user-supplied DB cluster identifier. If this + parameter is specified, it must match the identifier of an existing DB cluster. It returns + information from the specific DB cluster's automated backup. This parameter isn't + case-sensitive. +- `"DbClusterResourceId"`: The resource ID of the DB cluster that is the source of the + automated backup. This parameter isn't case-sensitive. +- `"Filters"`: A filter that specifies which resources to return based on status. Supported + filters are the following: status retained - Automated backups for deleted clusters + and after backup replication is stopped. db-cluster-id - Accepts DB cluster + identifiers and Amazon Resource Names (ARNs). The results list includes only information + about the DB cluster automated backups identified by these ARNs. db-cluster-resource-id + - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list + includes only information about the DB cluster resources identified by these ARNs. + Returns all resources by default. The status for each resource is specified in the response. +- `"Marker"`: The pagination token provided in the previous request. If this parameter is + specified the response includes only records beyond the marker, up to MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that you can retrieve the remaining results. +""" +function describe_dbcluster_automated_backups(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBClusterAutomatedBackups"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dbcluster_automated_backups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBClusterAutomatedBackups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dbcluster_backtracks(dbcluster_identifier) describe_dbcluster_backtracks(dbcluster_identifier, params::Dict{String,<:Any}) @@ -4024,8 +4650,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. -- `"Source"`: A value that indicates to return only parameters for a specific source. - Parameter sources can be engine, service, or customer. +- `"Source"`: A specific source to return parameters for. Valid Values: customer + engine service """ function describe_dbcluster_parameters( DBClusterParameterGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -4128,20 +4754,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value is stored as a lowercase string. Constraints: If supplied, must match the identifier of an existing DBClusterSnapshot. If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified. +- `"DbClusterResourceId"`: A specific DB cluster resource ID to describe. - `"Filters"`: A filter that specifies one or more DB cluster snapshots to describe. Supported filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). db-cluster-snapshot-id - Accepts DB cluster snapshot identifiers. snapshot-type - Accepts types of DB cluster snapshots. engine - Accepts names of database engines. -- `"IncludePublic"`: A value that indicates whether to include manual DB cluster snapshots - that are public and can be copied or restored by any Amazon Web Services account. By - default, the public snapshots are not included. You can share a manual DB cluster snapshot - as public by using the ModifyDBClusterSnapshotAttribute API action. -- `"IncludeShared"`: A value that indicates whether to include shared manual DB cluster - snapshots from other Amazon Web Services accounts that this Amazon Web Services account has - been given permission to copy or restore. By default, these snapshots are not included. You - can give an Amazon Web Services account permission to restore a manual DB cluster snapshot - from another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action. +- `"IncludePublic"`: Specifies whether to include manual DB cluster snapshots that are + public and can be copied or restored by any Amazon Web Services account. By default, the + public snapshots are not included. You can share a manual DB cluster snapshot as public by + using the ModifyDBClusterSnapshotAttribute API action. +- `"IncludeShared"`: Specifies whether to include shared manual DB cluster snapshots from + other Amazon Web Services accounts that this Amazon Web Services account has been given + permission to copy or restore. By default, these snapshots are not included. You can give + an Amazon Web Services account permission to restore a manual DB cluster snapshot from + another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action. - `"Marker"`: An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -4183,20 +4810,20 @@ end describe_dbclusters() describe_dbclusters(params::Dict{String,<:Any}) -Returns information about Amazon Aurora DB clusters and Multi-AZ DB clusters. This API -supports pagination. For more information on Amazon Aurora DB clusters, see What is Amazon -Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see +Describes existing Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports +pagination. For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? +in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DBClusterIdentifier"`: The user-supplied DB cluster identifier or the Amazon Resource - Name (ARN) of the DB cluster. If this parameter is specified, information from only the + Name (ARN) of the DB cluster. If this parameter is specified, information for only the specific DB cluster is returned. This parameter isn't case-sensitive. Constraints: If - supplied, must match an existing DBClusterIdentifier. + supplied, must match an existing DB cluster identifier. - `"Filters"`: A filter that specifies one or more DB clusters to describe. Supported - filters: clone-group-id - Accepts clone group identifiers. The results list only + Filters: clone-group-id - Accepts clone group identifiers. The results list only includes information about the DB clusters associated with these clone groups. db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs. @@ -4206,15 +4833,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys includes information about the DB clusters associated with these domains. engine - Accepts engine names. The results list only includes information about the DB clusters for these engines. -- `"IncludeShared"`: Optional Boolean parameter that specifies whether the output includes - information about clusters shared from other Amazon Web Services accounts. +- `"IncludeShared"`: Specifies whether the output includes information about clusters + shared from other Amazon Web Services accounts. - `"Marker"`: An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: - Minimum 20, maximum 100. + Minimum 20, maximum 100 """ function describe_dbclusters(; aws_config::AbstractAWSConfig=global_aws_config()) return rds("DescribeDBClusters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -4231,19 +4858,21 @@ end describe_dbengine_versions() describe_dbengine_versions(params::Dict{String,<:Any}) -Returns a list of the available DB engines. +Describes the properties of specific versions of DB engines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DBParameterGroupFamily"`: The name of a specific DB parameter group family to return - details for. Constraints: If supplied, must match an existing DBParameterGroupFamily. -- `"DefaultOnly"`: A value that indicates whether only the default version of the specified - engine or engine and major version combination is returned. -- `"Engine"`: The database engine to return. Valid Values: aurora-mysql - aurora-postgresql custom-oracle-ee mariadb mysql oracle-ee - oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee - sqlserver-se sqlserver-ex sqlserver-web -- `"EngineVersion"`: The database engine version to return. Example: 5.1.49 + details for. Constraints: If supplied, must match an existing DB parameter group family. +- `"DefaultOnly"`: Specifies whether to return only the default version of the specified + engine or the engine and major version combination. +- `"Engine"`: The database engine to return version details for. Valid Values: + aurora-mysql aurora-postgresql custom-oracle-ee custom-oracle-ee-cdb + custom-oracle-se2 custom-oracle-se2-cdb db2-ae db2-se mariadb mysql + oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee + sqlserver-se sqlserver-ex sqlserver-web +- `"EngineVersion"`: A specific database engine version to return details for. Example: + 5.1.49 - `"Filters"`: A filter that specifies one or more DB engine versions to describe. Supported filters: db-parameter-group-family - Accepts parameter groups family names. The results list only includes information about the DB engine versions for these parameter @@ -4256,20 +4885,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys engine versions. status - Accepts engine version statuses. The results list only includes information about the DB engine versions for these statuses. Valid statuses are the following: available deprecated -- `"IncludeAll"`: A value that indicates whether to include engine versions that aren't - available in the list. The default is to list only available engine versions. -- `"ListSupportedCharacterSets"`: A value that indicates whether to list the supported - character sets for each engine version. If this parameter is enabled and the requested - engine supports the CharacterSetName parameter for CreateDBInstance, the response includes - a list of supported character sets for each engine version. For RDS Custom, the default is - not to list supported character sets. If you set ListSupportedCharacterSets to true, RDS - Custom returns no results. -- `"ListSupportedTimezones"`: A value that indicates whether to list the supported time - zones for each engine version. If this parameter is enabled and the requested engine - supports the TimeZone parameter for CreateDBInstance, the response includes a list of - supported time zones for each engine version. For RDS Custom, the default is not to list - supported time zones. If you set ListSupportedTimezones to true, RDS Custom returns no - results. +- `"IncludeAll"`: Specifies whether to also list the engine versions that aren't available. + The default is to list only available engine versions. +- `"ListSupportedCharacterSets"`: Specifies whether to list the supported character sets + for each engine version. If this parameter is enabled and the requested engine supports the + CharacterSetName parameter for CreateDBInstance, the response includes a list of supported + character sets for each engine version. For RDS Custom, the default is not to list + supported character sets. If you enable this parameter, RDS Custom returns no results. +- `"ListSupportedTimezones"`: Specifies whether to list the supported time zones for each + engine version. If this parameter is enabled and the requested engine supports the TimeZone + parameter for CreateDBInstance, the response includes a list of supported time zones for + each engine version. For RDS Custom, the default is not to list supported time zones. If + you enable this parameter, RDS Custom returns no results. - `"Marker"`: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -4312,20 +4939,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys doesn't apply to RDS Custom. - `"DBInstanceIdentifier"`: (Optional) The user-supplied instance identifier. If this parameter is specified, it must match the identifier of an existing DB instance. It returns - information from the specific DB instance' automated backup. This parameter isn't + information from the specific DB instance's automated backup. This parameter isn't case-sensitive. - `"DbiResourceId"`: The resource ID of the DB instance that is the source of the automated backup. This parameter isn't case-sensitive. - `"Filters"`: A filter that specifies which resources to return based on status. Supported - filters are the following: status active - automated backups for current instances - retained - automated backups for deleted instances and after backup replication is stopped - creating - automated backups that are waiting for the first automated snapshot to be - available db-instance-id - Accepts DB instance identifiers and Amazon Resource Names - (ARNs). The results list includes only information about the DB instance automated backups - identified by these ARNs. dbi-resource-id - Accepts DB resource identifiers and Amazon + filters are the following: status active - Automated backups for current instances. + creating - Automated backups that are waiting for the first automated snapshot to be + available. retained - Automated backups for deleted instances and after backup + replication is stopped. db-instance-id - Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance - resources identified by these ARNs. Returns all resources by default. The status for each - resource is specified in the response. + automated backups identified by these ARNs. dbi-resource-id - Accepts DB resource + identifiers and Amazon Resource Names (ARNs). The results list includes only information + about the DB instance resources identified by these ARNs. Returns all resources by + default. The status for each resource is specified in the response. - `"Marker"`: The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records @@ -4356,27 +4983,26 @@ end describe_dbinstances() describe_dbinstances(params::Dict{String,<:Any}) -Returns information about provisioned RDS instances. This API supports pagination. This -operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB -instances. +Describes provisioned RDS instances. This API supports pagination. This operation can also +return information for Amazon Neptune DB instances and Amazon DocumentDB instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DBInstanceIdentifier"`: The user-supplied instance identifier or the Amazon Resource Name (ARN) of the DB instance. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive. Constraints: If - supplied, must match the identifier of an existing DBInstance. + supplied, must match the identifier of an existing DB instance. - `"Filters"`: A filter that specifies one or more DB instances to describe. Supported - filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource + Filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB instances associated with the DB clusters identified by these ARNs. db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list only includes information about the DB instances identified by these ARNs. dbi-resource-id - Accepts - DB instance resource identifiers. The results list will only include information about the - DB instances identified by these DB instance resource identifiers. domain - Accepts - Active Directory directory IDs. The results list only includes information about the DB - instances associated with these domains. engine - Accepts engine names. The results list - only includes information about the DB instances for these engines. + DB instance resource identifiers. The results list only includes information about the DB + instances identified by these DB instance resource identifiers. domain - Accepts Active + Directory directory IDs. The results list only includes information about the DB instances + associated with these domains. engine - Accepts engine names. The results list only + includes information about the DB instances for these engines. - `"Marker"`: An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -4707,6 +5333,65 @@ function describe_dbproxy_targets( ) end +""" + describe_dbrecommendations() + describe_dbrecommendations(params::Dict{String,<:Any}) + +Describes the recommendations to resolve the issues for your DB instances, DB clusters, and +DB parameter groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: A filter that specifies one or more recommendations to describe. Supported + Filters: recommendation-id - Accepts a list of recommendation identifiers. The results + list only includes the recommendations whose identifier is one of the specified filter + values. status - Accepts a list of recommendation statuses. Valid values: active - + The recommendations which are ready for you to apply. pending - The applied or scheduled + recommendations which are in progress. resolved - The recommendations which are + completed. dismissed - The recommendations that you dismissed. The results list only + includes the recommendations whose status is one of the specified filter values. + severity - Accepts a list of recommendation severities. The results list only includes the + recommendations whose severity is one of the specified filter values. Valid values: high + medium low informational type-id - Accepts a list of recommendation type + identifiers. The results list only includes the recommendations whose type is one of the + specified filter values. dbi-resource-id - Accepts a list of database resource + identifiers. The results list only includes the recommendations that generated for the + specified databases. cluster-resource-id - Accepts a list of cluster resource + identifiers. The results list only includes the recommendations that generated for the + specified clusters. pg-arn - Accepts a list of parameter group ARNs. The results list + only includes the recommendations that generated for the specified parameter groups. + cluster-pg-arn - Accepts a list of cluster parameter group ARNs. The results list only + includes the recommendations that generated for the specified cluster parameter groups. +- `"LastUpdatedAfter"`: A filter to include only the recommendations that were updated + after this specified time. +- `"LastUpdatedBefore"`: A filter to include only the recommendations that were updated + before this specified time. +- `"Locale"`: The language that you choose to return the list of recommendations. Valid + values: en en_UK de es fr id it ja ko pt_BR + zh_TW zh_CN +- `"Marker"`: An optional pagination token provided by a previous DescribeDBRecommendations + request. If this parameter is specified, the response includes only records beyond the + marker, up to the value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of recommendations to include in the response. If more + records exist than the specified MaxRecords value, a pagination token called a marker is + included in the response so that you can retrieve the remaining results. +""" +function describe_dbrecommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return rds( + "DescribeDBRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_dbrecommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dbsecurity_groups() describe_dbsecurity_groups(params::Dict{String,<:Any}) @@ -4747,6 +5432,43 @@ function describe_dbsecurity_groups( ) end +""" + describe_dbshard_groups() + describe_dbshard_groups(params::Dict{String,<:Any}) + +Describes existing Aurora Limitless Database DB shard groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DBShardGroupIdentifier"`: The user-supplied DB shard group identifier or the Amazon + Resource Name (ARN) of the DB shard group. If this parameter is specified, information for + only the specific DB shard group is returned. This parameter isn't case-sensitive. + Constraints: If supplied, must match an existing DB shard group identifier. +- `"Filters"`: A filter that specifies one or more DB shard groups to describe. +- `"Marker"`: An optional pagination token provided by a previous DescribeDBShardGroups + request. If this parameter is specified, the response includes only records beyond the + marker, up to the value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so you can retrieve the remaining results. Default: 100 Constraints: + Minimum 20, maximum 100 +""" +function describe_dbshard_groups(; aws_config::AbstractAWSConfig=global_aws_config()) + return rds( + "DescribeDBShardGroups"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_dbshard_groups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBShardGroups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dbsnapshot_attributes(dbsnapshot_identifier) describe_dbsnapshot_attributes(dbsnapshot_identifier, params::Dict{String,<:Any}) @@ -4794,6 +5516,71 @@ function describe_dbsnapshot_attributes( ) end +""" + describe_dbsnapshot_tenant_databases() + describe_dbsnapshot_tenant_databases(params::Dict{String,<:Any}) + +Describes the tenant databases that exist in a DB snapshot. This command only applies to +RDS for Oracle DB instances in the multi-tenant configuration. You can use this command to +inspect the tenant databases within a snapshot before restoring it. You can't directly +interact with the tenant databases in a DB snapshot. If you restore a snapshot that was +taken from DB instance using the multi-tenant configuration, you restore all its tenant +databases. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DBInstanceIdentifier"`: The ID of the DB instance used to create the DB snapshots. This + parameter isn't case-sensitive. Constraints: If supplied, must match the identifier of an + existing DBInstance. +- `"DBSnapshotIdentifier"`: The ID of a DB snapshot that contains the tenant databases to + describe. This value is stored as a lowercase string. Constraints: If you specify this + parameter, the value must match the ID of an existing DB snapshot. If you specify an + automatic snapshot, you must also specify SnapshotType. +- `"DbiResourceId"`: A specific DB resource identifier to describe. +- `"Filters"`: A filter that specifies one or more tenant databases to describe. Supported + filters: tenant-db-name - Tenant database names. The results list only includes + information about the tenant databases that match these tenant DB names. + tenant-database-resource-id - Tenant database resource identifiers. The results list only + includes information about the tenant databases contained within the DB snapshots. + dbi-resource-id - DB instance resource identifiers. The results list only includes + information about snapshots containing tenant databases contained within the DB instances + identified by these resource identifiers. db-instance-id - Accepts DB instance + identifiers and DB instance Amazon Resource Names (ARNs). db-snapshot-id - Accepts DB + snapshot identifiers. snapshot-type - Accepts types of DB snapshots. +- `"Marker"`: An optional pagination token provided by a previous + DescribeDBSnapshotTenantDatabases request. If this parameter is specified, the response + includes only records beyond the marker, up to the value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that you can retrieve the remaining results. +- `"SnapshotType"`: The type of DB snapshots to be returned. You can specify one of the + following values: automated – All DB snapshots that have been automatically taken by + Amazon RDS for my Amazon Web Services account. manual – All DB snapshots that have + been taken by my Amazon Web Services account. shared – All manual DB snapshots that + have been shared to my Amazon Web Services account. public – All DB snapshots that + have been marked as public. awsbackup – All DB snapshots managed by the Amazon Web + Services Backup service. +""" +function describe_dbsnapshot_tenant_databases(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBSnapshotTenantDatabases"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dbsnapshot_tenant_databases( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeDBSnapshotTenantDatabases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_dbsnapshots() describe_dbsnapshots(params::Dict{String,<:Any}) @@ -4815,16 +5602,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Resource Names (ARNs). db-snapshot-id - Accepts DB snapshot identifiers. dbi-resource-id - Accepts identifiers of source DB instances. snapshot-type - Accepts types of DB snapshots. engine - Accepts names of database engines. -- `"IncludePublic"`: A value that indicates whether to include manual DB cluster snapshots - that are public and can be copied or restored by any Amazon Web Services account. By - default, the public snapshots are not included. You can share a manual DB snapshot as - public by using the ModifyDBSnapshotAttribute API. This setting doesn't apply to RDS Custom. -- `"IncludeShared"`: A value that indicates whether to include shared manual DB cluster - snapshots from other Amazon Web Services accounts that this Amazon Web Services account has - been given permission to copy or restore. By default, these snapshots are not included. You - can give an Amazon Web Services account permission to restore a manual DB snapshot from - another Amazon Web Services account by using the ModifyDBSnapshotAttribute API action. This - setting doesn't apply to RDS Custom. +- `"IncludePublic"`: Specifies whether to include manual DB cluster snapshots that are + public and can be copied or restored by any Amazon Web Services account. By default, the + public snapshots are not included. You can share a manual DB snapshot as public by using + the ModifyDBSnapshotAttribute API. This setting doesn't apply to RDS Custom. +- `"IncludeShared"`: Specifies whether to include shared manual DB cluster snapshots from + other Amazon Web Services accounts that this Amazon Web Services account has been given + permission to copy or restore. By default, these snapshots are not included. You can give + an Amazon Web Services account permission to restore a manual DB snapshot from another + Amazon Web Services account by using the ModifyDBSnapshotAttribute API action. This setting + doesn't apply to RDS Custom. - `"Marker"`: An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -4964,15 +5751,16 @@ engine. - `dbparameter_group_family`: The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 - mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 - mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 - oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 - postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 - sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 - sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 - sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 - sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 - sqlserver-web-14.0 sqlserver-web-15.0 + custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 + mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 + oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 + oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 + postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 + sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 + sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 + sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 + sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 + sqlserver-web-15.0 # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5026,7 +5814,7 @@ messages\" section of the Amazon RDS User Guide or the Amazon Aurora User Gui Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: This parameter isn't currently supported. - `"SourceType"`: The type of source that is generating the events. For RDS Proxy events, - specify db-proxy. Valid values: db-instance | db-cluster | db-parameter-group | + specify db-proxy. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy """ function describe_event_categories(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -5195,7 +5983,10 @@ User Guide. This action only applies to Aurora DB clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: This parameter isn't currently supported. +- `"Filters"`: A filter that specifies one or more global database clusters to describe. + This parameter is case-sensitive. Currently, the only supported filter is region. If used, + the request returns information about any global cluster with at least one member (primary + or secondary) in the specified Amazon Web Services Regions. - `"GlobalClusterIdentifier"`: The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an existing @@ -5224,17 +6015,51 @@ function describe_global_clusters( ) end +""" + describe_integrations() + describe_integrations(params::Dict{String,<:Any}) + +Describe one or more zero-ETL integrations with Amazon Redshift. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: A filter that specifies one or more resources to return. +- `"IntegrationIdentifier"`: The unique identifier of the integration. +- `"Marker"`: An optional pagination token provided by a previous DescribeIntegrations + request. If this parameter is specified, the response includes only records beyond the + marker, up to the value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that you can retrieve the remaining results. Default: 100 Constraints: + Minimum 20, maximum 100. +""" +function describe_integrations(; aws_config::AbstractAWSConfig=global_aws_config()) + return rds( + "DescribeIntegrations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_integrations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeIntegrations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_option_group_options(engine_name) describe_option_group_options(engine_name, params::Dict{String,<:Any}) -Describes all available options. +Describes all available options for the specified engine. # Arguments -- `engine_name`: A required parameter. Options available for the given engine name are - described. Valid Values: mariadb mysql oracle-ee oracle-ee-cdb - oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se - sqlserver-ex sqlserver-web +- `engine_name`: The name of the engine to describe options for. Valid Values: db2-ae + db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 + oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex + sqlserver-web # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5282,8 +6107,8 @@ Describes the available option groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"EngineName"`: Filters the list of option groups to only include groups associated with - a specific database engine. Valid Values: mariadb mysql oracle-ee +- `"EngineName"`: A filter to only include option groups associated with this database + engine. Valid Values: db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web - `"Filters"`: This parameter isn't currently supported. @@ -5320,39 +6145,38 @@ end describe_orderable_dbinstance_options(engine) describe_orderable_dbinstance_options(engine, params::Dict{String,<:Any}) -Returns a list of orderable DB instance options for the specified DB engine, DB engine -version, and DB instance class. +Describes the orderable DB instance options for a specified DB engine. # Arguments -- `engine`: The name of the engine to retrieve DB instance options for. Valid Values: - aurora-mysql aurora-postgresql custom-oracle-ee mariadb mysql oracle-ee - oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee - sqlserver-se sqlserver-ex sqlserver-web +- `engine`: The name of the database engine to describe DB instance options for. Valid + Values: aurora-mysql aurora-postgresql custom-oracle-ee custom-oracle-ee-cdb + custom-oracle-se2 custom-oracle-se2-cdb db2-ae db2-se mariadb mysql + oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres + sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AvailabilityZoneGroup"`: The Availability Zone group associated with a Local Zone. - Specify this parameter to retrieve available offerings for the Local Zones in the group. - Omit this parameter to show the available offerings in the specified Amazon Web Services - Region. This setting doesn't apply to RDS Custom. -- `"DBInstanceClass"`: The DB instance class filter value. Specify this parameter to show - only the available offerings matching the specified DB instance class. -- `"EngineVersion"`: The engine version filter value. Specify this parameter to show only - the available offerings matching the specified engine version. + Specify this parameter to retrieve available options for the Local Zones in the group. Omit + this parameter to show the available options in the specified Amazon Web Services Region. + This setting doesn't apply to RDS Custom DB instances. +- `"DBInstanceClass"`: A filter to include only the available options for the specified DB + instance class. +- `"EngineVersion"`: A filter to include only the available options for the specified + engine version. - `"Filters"`: This parameter isn't currently supported. -- `"LicenseModel"`: The license model filter value. Specify this parameter to show only the - available offerings matching the specified license model. RDS Custom supports only the BYOL - licensing model. +- `"LicenseModel"`: A filter to include only the available options for the specified + license model. RDS Custom supports only the BYOL licensing model. - `"Marker"`: An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: - Minimum 20, maximum 10000. -- `"Vpc"`: A value that indicates whether to show only VPC or non-VPC offerings. RDS Custom - supports only VPC offerings. RDS Custom supports only VPC offerings. If you describe - non-VPC offerings for RDS Custom, the output shows VPC offerings. + Minimum 20, maximum 1000. +- `"Vpc"`: Specifies whether to show only VPC or non-VPC offerings. RDS Custom supports + only VPC offerings. RDS Custom supports only VPC offerings. If you describe non-VPC + offerings for RDS Custom, the output shows VPC offerings. """ function describe_orderable_dbinstance_options( Engine; aws_config::AbstractAWSConfig=global_aws_config() @@ -5444,8 +6268,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. -- `"MultiAZ"`: A value that indicates whether to show only those reservations that support - Multi-AZ. +- `"MultiAZ"`: Specifies whether to show only those reservations that support Multi-AZ. - `"OfferingType"`: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\" @@ -5495,8 +6318,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. -- `"MultiAZ"`: A value that indicates whether to show only those reservations that support - Multi-AZ. +- `"MultiAZ"`: Specifies whether to show only those reservations that support Multi-AZ. - `"OfferingType"`: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\" @@ -5520,7 +6342,48 @@ function describe_reserved_dbinstances_offerings( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return rds( - "DescribeReservedDBInstancesOfferings", + "DescribeReservedDBInstancesOfferings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_source_regions() + describe_source_regions(params::Dict{String,<:Any}) + +Returns a list of the source Amazon Web Services Regions where the current Amazon Web +Services Region can create a read replica, copy a DB snapshot from, or replicate automated +backups from. Use this operation to determine whether cross-Region features are supported +between other Regions and your current Region. This operation supports pagination. To +return information about the Regions that are enabled for your account, or all Regions, use +the EC2 operation DescribeRegions. For more information, see DescribeRegions in the Amazon +EC2 API Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: This parameter isn't currently supported. +- `"Marker"`: An optional pagination token provided by a previous DescribeSourceRegions + request. If this parameter is specified, the response includes only records beyond the + marker, up to the value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so you can retrieve the remaining results. Default: 100 Constraints: + Minimum 20, maximum 100. +- `"RegionName"`: The source Amazon Web Services Region name. For example, us-east-1. + Constraints: Must specify a valid Amazon Web Services Region name. +""" +function describe_source_regions(; aws_config::AbstractAWSConfig=global_aws_config()) + return rds( + "DescribeSourceRegions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_source_regions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DescribeSourceRegions", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -5528,40 +6391,43 @@ function describe_reserved_dbinstances_offerings( end """ - describe_source_regions() - describe_source_regions(params::Dict{String,<:Any}) + describe_tenant_databases() + describe_tenant_databases(params::Dict{String,<:Any}) -Returns a list of the source Amazon Web Services Regions where the current Amazon Web -Services Region can create a read replica, copy a DB snapshot from, or replicate automated -backups from. Use this operation to determine whether cross-Region features are supported -between other Regions and your current Region. This operation supports pagination. To -return information about the Regions that are enabled for your account, or all Regions, use -the EC2 operation DescribeRegions. For more information, see DescribeRegions in the Amazon -EC2 API Reference. +Describes the tenant databases in a DB instance that uses the multi-tenant configuration. +Only RDS for Oracle CDB instances are supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: This parameter isn't currently supported. -- `"Marker"`: An optional pagination token provided by a previous DescribeSourceRegions +- `"DBInstanceIdentifier"`: The user-supplied DB instance identifier, which must match the + identifier of an existing instance owned by the Amazon Web Services account. This parameter + isn't case-sensitive. +- `"Filters"`: A filter that specifies one or more database tenants to describe. Supported + filters: tenant-db-name - Tenant database names. The results list only includes + information about the tenant databases that match these tenant DB names. + tenant-database-resource-id - Tenant database resource identifiers. dbi-resource-id - DB + instance resource identifiers. The results list only includes information about the tenants + contained within the DB instances identified by these resource identifiers. +- `"Marker"`: An optional pagination token provided by a previous DescribeTenantDatabases request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so you can retrieve the remaining results. Default: 100 Constraints: - Minimum 20, maximum 100. -- `"RegionName"`: The source Amazon Web Services Region name. For example, us-east-1. - Constraints: Must specify a valid Amazon Web Services Region name. + in the response so that you can retrieve the remaining results. +- `"TenantDBName"`: The user-supplied tenant database name, which must match the name of an + existing tenant database on the specified DB instance owned by your Amazon Web Services + account. This parameter isn’t case-sensitive. """ -function describe_source_regions(; aws_config::AbstractAWSConfig=global_aws_config()) +function describe_tenant_databases(; aws_config::AbstractAWSConfig=global_aws_config()) return rds( - "DescribeSourceRegions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "DescribeTenantDatabases"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end -function describe_source_regions( +function describe_tenant_databases( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return rds( - "DescribeSourceRegions", + "DescribeTenantDatabases", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -5609,6 +6475,45 @@ function describe_valid_dbinstance_modifications( ) end +""" + disable_http_endpoint(resource_arn) + disable_http_endpoint(resource_arn, params::Dict{String,<:Any}) + +Disables the HTTP endpoint for the specified DB cluster. Disabling this endpoint disables +RDS Data API. For more information, see Using RDS Data API in the Amazon Aurora User Guide. + This operation applies only to Aurora PostgreSQL Serverless v2 and provisioned DB +clusters. To disable the HTTP endpoint for Aurora Serverless v1 DB clusters, use the +EnableHttpEndpoint parameter of the ModifyDBCluster operation. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the DB cluster. + +""" +function disable_http_endpoint( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "DisableHttpEndpoint", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disable_http_endpoint( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "DisableHttpEndpoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ download_dblog_file_portion(dbinstance_identifier, log_file_name) download_dblog_file_portion(dbinstance_identifier, log_file_name, params::Dict{String,<:Any}) @@ -5676,28 +6581,71 @@ function download_dblog_file_portion( ) end +""" + enable_http_endpoint(resource_arn) + enable_http_endpoint(resource_arn, params::Dict{String,<:Any}) + +Enables the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled. +When enabled, this endpoint provides a connectionless web service API (RDS Data API) for +running SQL queries on the Aurora DB cluster. You can also query your database from inside +the RDS console with the RDS query editor. For more information, see Using RDS Data API in +the Amazon Aurora User Guide. This operation applies only to Aurora PostgreSQL Serverless +v2 and provisioned DB clusters. To enable the HTTP endpoint for Aurora Serverless v1 DB +clusters, use the EnableHttpEndpoint parameter of the ModifyDBCluster operation. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the DB cluster. + +""" +function enable_http_endpoint( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "EnableHttpEndpoint", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function enable_http_endpoint( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "EnableHttpEndpoint", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ failover_dbcluster(dbcluster_identifier) failover_dbcluster(dbcluster_identifier, params::Dict{String,<:Any}) Forces a failover for a DB cluster. For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the -primary DB instance (the cluster writer). For a Multi-AZ DB cluster, failover for a DB -cluster promotes one of the readable standby DB instances (read-only instances) in the DB -cluster to be the primary DB instance (the cluster writer). An Amazon Aurora DB cluster -automatically fails over to an Aurora Replica, if one exists, when the primary DB instance -fails. A Multi-AZ DB cluster automatically fails over to a readable standby DB instance -when the primary DB instance fails. To simulate a failure of a primary instance for -testing, you can force a failover. Because each instance in a DB cluster has its own -endpoint address, make sure to clean up and re-establish any existing connections that use -those endpoint addresses when the failover is complete. For more information on Amazon -Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide. For more -information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS -User Guide. +primary DB instance (the cluster writer). For a Multi-AZ DB cluster, after RDS terminates +the primary DB instance, the internal monitoring system detects that the primary DB +instance is unhealthy and promotes a readable standby (read-only instances) in the DB +cluster to be the primary DB instance (the cluster writer). Failover times are typically +less than 35 seconds. An Amazon Aurora DB cluster automatically fails over to an Aurora +Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster +automatically fails over to a readable standby DB instance when the primary DB instance +fails. To simulate a failure of a primary instance for testing, you can force a failover. +Because each instance in a DB cluster has its own endpoint address, make sure to clean up +and re-establish any existing connections that use those endpoint addresses when the +failover is complete. For more information on Amazon Aurora DB clusters, see What is +Amazon Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB +clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. # Arguments -- `dbcluster_identifier`: A DB cluster identifier to force a failover for. This parameter - isn't case-sensitive. Constraints: Must match the identifier of an existing DBCluster. +- `dbcluster_identifier`: The identifier of the DB cluster to force a failover for. This + parameter isn't case-sensitive. Constraints: Must match the identifier of an existing DB + cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5739,29 +6687,49 @@ end failover_global_cluster(global_cluster_identifier, target_db_cluster_identifier) failover_global_cluster(global_cluster_identifier, target_db_cluster_identifier, params::Dict{String,<:Any}) -Initiates the failover process for an Aurora global database (GlobalCluster). A failover -for an Aurora global database promotes one of secondary read-only DB clusters to be the -primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB -cluster. In other words, the role of the current primary DB cluster and the selected -(target) DB cluster are switched. The selected secondary DB cluster assumes full read/write -capabilities for the Aurora global database. For more information about failing over an -Amazon Aurora global database, see Managed planned failover for Amazon Aurora global -databases in the Amazon Aurora User Guide. This action applies to GlobalCluster (Aurora -global databases) only. Use this action only on healthy Aurora global databases with -running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios -or to reconfigure your Aurora global database topology. +Promotes the specified secondary DB cluster to be the primary DB cluster in the global +database cluster to fail over or switch over a global database. Switchover operations were +previously called \"managed planned failovers.\" Although this operation can be used +either to fail over or to switch over a global database cluster, its intended use is for +global database failover. To switch over a global database cluster, we recommend that you +use the SwitchoverGlobalCluster operation instead. How you use this operation depends on +whether you are failing over or switching over your global database cluster: Failing over +- Specify the AllowDataLoss parameter and don't specify the Switchover parameter. +Switching over - Specify the Switchover parameter or omit it, but don't specify the +AllowDataLoss parameter. About failing over and switching over While failing over and +switching over a global database cluster both change the primary DB cluster, you use these +operations for different reasons: Failing over - Use this operation to respond to an +unplanned event, such as a Regional disaster in the primary Region. Failing over can result +in a loss of write transaction data that wasn't replicated to the chosen secondary before +the failover event occurred. However, the recovery process that promotes a DB instance on +the chosen seconday DB cluster to be the primary writer DB instance guarantees that the +data is in a transactionally consistent state. For more information about failing over an +Amazon Aurora global database, see Performing managed failovers for Aurora global databases +in the Amazon Aurora User Guide. Switching over - Use this operation on a healthy global +database cluster for planned events, such as Regional rotation or to fail back to the +original primary DB cluster after a failover operation. With this operation, there is no +data loss. For more information about switching over an Amazon Aurora global database, see +Performing switchovers for Aurora global databases in the Amazon Aurora User Guide. # Arguments -- `global_cluster_identifier`: Identifier of the Aurora global database (GlobalCluster) - that should be failed over. The identifier is the unique key assigned by the user when the - Aurora global database was created. In other words, it's the name of the Aurora global - database that you want to fail over. Constraints: Must match the identifier of an - existing GlobalCluster (Aurora global database). -- `target_db_cluster_identifier`: Identifier of the secondary Aurora DB cluster that you - want to promote to primary for the Aurora global database (GlobalCluster.) Use the Amazon - Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon - Web Services Region. +- `global_cluster_identifier`: The identifier of the global database cluster (Aurora global + database) this operation should apply to. The identifier is the unique key assigned by the + user when the Aurora global database is created. In other words, it's the name of the + Aurora global database. Constraints: Must match the identifier of an existing global + database cluster. +- `target_db_cluster_identifier`: The identifier of the secondary Aurora DB cluster that + you want to promote to the primary for the global database cluster. Use the Amazon Resource + Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web + Services Region. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowDataLoss"`: Specifies whether to allow data loss for this global database cluster + operation. Allowing data loss triggers a global failover operation. If you don't specify + AllowDataLoss, the global database cluster operation defaults to a switchover. Constraints: + Can't be specified together with the Switchover parameter. +- `"Switchover"`: Specifies whether to switch over this global database cluster. + Constraints: Can't be specified together with the AllowDataLoss parameter. """ function failover_global_cluster( GlobalClusterIdentifier, @@ -5859,7 +6827,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys read/write. When it is locked, it is read-only. You can edit your audit policy only when the activity stream is unlocked or stopped. - `"ResourceArn"`: The Amazon Resource Name (ARN) of the RDS for Oracle or Microsoft SQL - Server DB instance. For example, arn:aws:rds:us-east-1:12345667890:instance:my-orcl-db. + Server DB instance. For example, arn:aws:rds:us-east-1:12345667890:db:my-orcl-db. """ function modify_activity_stream(; aws_config::AbstractAWSConfig=global_aws_config()) return rds( @@ -5902,9 +6870,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CertificateIdentifier"`: The new default certificate identifier to override the current one with. To determine the valid values, use the describe-certificates CLI command or the DescribeCertificates API operation. -- `"RemoveCustomerOverride"`: A value that indicates whether to remove the override for the - default certificate. If the override is removed, the default certificate is the system - default. +- `"RemoveCustomerOverride"`: Specifies whether to remove the override for the default + certificate. If the override is removed, the default certificate is the system default. """ function modify_certificates(; aws_config::AbstractAWSConfig=global_aws_config()) return rds("ModifyCertificates"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -5932,7 +6899,7 @@ Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide. If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that prevent Aurora Serverless v1 from finding a scaling point might be dropped. For more information about scaling points, see Autoscaling for Aurora Serverless v1 in the Amazon Aurora User -Guide. This action only applies to Aurora Serverless v1 DB clusters. +Guide. This operation only applies to Aurora Serverless v1 DB clusters. # Arguments - `dbcluster_identifier`: The DB cluster identifier for the cluster being modified. This @@ -5996,8 +6963,8 @@ ModifyCustomDbEngineVersion event. For more information, see Modifying CEV stat Amazon RDS User Guide. # Arguments -- `engine`: The DB engine. The only supported values are custom-oracle-ee and - custom-oracle-ee-cdb. +- `engine`: The database engine. RDS Custom for Oracle supports the following values: + custom-oracle-ee custom-oracle-ee-cdb custom-oracle-se2 custom-oracle-se2-cdb - `engine_version`: The custom engine version (CEV) that you want to modify. This option is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and EngineVersion is unique per customer per Amazon Web Services Region. @@ -6049,7 +7016,7 @@ end modify_dbcluster(dbcluster_identifier) modify_dbcluster(dbcluster_identifier, params::Dict{String,<:Any}) -Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can +Modifies the settings of an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request. For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see Multi-AZ @@ -6057,102 +7024,116 @@ DB cluster deployments in the Amazon RDS User Guide. # Arguments - `dbcluster_identifier`: The DB cluster identifier for the cluster being modified. This - parameter isn't case-sensitive. Constraints: This identifier must match the identifier of - an existing DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Constraints: Must match the identifier of an existing DB cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate to each DB - instance in the Multi-AZ DB cluster. Valid for: Multi-AZ DB clusters only -- `"AllowEngineModeChange"`: A value that indicates whether engine mode changes from - serverless to provisioned are allowed. Constraints: You must allow engine mode changes when - specifying a different value for the EngineMode parameter from the DB cluster's current - engine mode. Valid for: Aurora Serverless v1 DB clusters only -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Constraints: You must allow major version upgrades when specifying a value for the - EngineVersion parameter that is a different major version than the DB cluster's current - version. Valid for: Aurora DB clusters only -- `"ApplyImmediately"`: A value that indicates whether the modifications in this request - and any pending modifications are asynchronously applied as soon as possible, regardless of - the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, + instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only +- `"AllowEngineModeChange"`: Specifies whether engine mode changes from serverless to + provisioned are allowed. Valid for Cluster Type: Aurora Serverless v1 DB clusters only + Constraints: You must allow engine mode changes when specifying a different value for the + EngineMode parameter from the DB cluster's current engine mode. +- `"AllowMajorVersionUpgrade"`: Specifies whether major version upgrades are allowed. Valid + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: You must allow + major version upgrades when specifying a value for the EngineVersion parameter that is a + different major version than the DB cluster's current version. +- `"ApplyImmediately"`: Specifies whether the modifications in this request and any pending + modifications are asynchronously applied as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By - default, this parameter is disabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB cluster during the maintenance window. By default, minor - engine upgrades are applied automatically. Valid for: Multi-AZ DB clusters only + default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB cluster during the maintenance window. By default, minor engine + upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only +- `"AwsBackupRecoveryPointArn"`: The Amazon Resource Name (ARN) of the recovery point in + Amazon Web Services Backup. - `"BacktrackWindow"`: The target backtrack window, in seconds. To disable backtracking, - set this value to 0. Default: 0 Constraints: If specified, this value must be set to a - number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only + set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 + Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 + hours). - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. - Specify a minimum value of 1. Default: 1 Constraints: Must be a value from 1 to 35 - Valid for: Aurora DB clusters and Multi-AZ DB clusters + Specify a minimum value of 1. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Default: 1 Constraints: Must be a value from 1 to 35. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB cluster's + server certificate. For more information, see Using SSL/TLS to encrypt a connection to a DB + instance in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters - `"CloudwatchLogsExportConfiguration"`: The configuration setting for the log types to be - enabled for export to CloudWatch Logs for a specific DB cluster. The values in the list - depend on the DB engine being used. RDS for MySQL Possible values are error, general, and - slowquery. RDS for PostgreSQL Possible values are postgresql and upgrade. Aurora MySQL - Possible values are audit, error, general, and slowquery. Aurora PostgreSQL Possible - value is postgresql. For more information about exporting CloudWatch Logs for Amazon RDS, - see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For - more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database - Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB - clusters and Multi-AZ DB clusters -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - cluster to snapshots of the DB cluster. The default is not to copy them. Valid for: Aurora - DB clusters and Multi-AZ DB clusters + enabled for export to CloudWatch Logs for a specific DB cluster. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters The following values are valid for each DB + engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - + postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - + postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon + RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. + For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing + Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB cluster to + snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS - User Guide. Valid for: Multi-AZ DB clusters only + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to use for - the DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"DBInstanceParameterGroupName"`: The name of the DB parameter group to apply to all instances of the DB cluster. When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. - Default: The existing name setting Constraints: The DB parameter group must be in the - same DB parameter group family as this DB cluster. The DBInstanceParameterGroupName - parameter is valid in combination with the AllowMajorVersionUpgrade parameter for a major - version upgrade only. Valid for: Aurora DB clusters only -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB + Valid for Cluster Type: Aurora DB clusters only Default: The existing name setting + Constraints: The DB parameter group must be in the same DB parameter group family as this + DB cluster. The DBInstanceParameterGroupName parameter is valid in combination with the + AllowMajorVersionUpgrade parameter for a major version upgrade only. +- `"DeletionProtection"`: Specifies whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. By default, deletion + protection isn't enabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"Domain"`: The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation. For more information, see Kerberos Authentication in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only -- `"EnableGlobalWriteForwarding"`: A value that indicates whether to enable this DB cluster - to forward write operations to the primary cluster of an Aurora global database - (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that - are secondary clusters in an Aurora global database. You can set this value only on Aurora - DB clusters that are members of an Aurora global database. With this parameter enabled, a - secondary cluster can forward writes to the current primary cluster and the resulting - changes are replicated back to this cluster. For the primary DB cluster of an Aurora global - database, this value is used immediately if the primary is demoted by the - FailoverGlobalCluster API operation, but it does nothing until then. Valid for: Aurora DB + Guide. Valid for Cluster Type: Aurora DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. Valid for Cluster Type: Aurora DB clusters only +- `"EnableGlobalWriteForwarding"`: Specifies whether to enable this DB cluster to forward + write operations to the primary cluster of a global cluster (Aurora global database). By + default, write operations are not allowed on Aurora DB clusters that are secondary clusters + in an Aurora global database. You can set this value only on Aurora DB clusters that are + members of an Aurora global database. With this parameter enabled, a secondary cluster can + forward writes to the current primary cluster, and the resulting changes are replicated + back to this cluster. For the primary DB cluster of an Aurora global database, this value + is used immediately if the primary is demoted by a global cluster API operation, but it + does nothing until then. Valid for Cluster Type: Aurora DB clusters only +- `"EnableHttpEndpoint"`: Specifies whether to enable the HTTP endpoint for an Aurora + Serverless v1 DB cluster. By default, the HTTP endpoint isn't enabled. When enabled, the + HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL + queries on the Aurora Serverless v1 DB cluster. You can also query your database from + inside the RDS console with the RDS query editor. For more information, see Using RDS Data + API in the Amazon Aurora User Guide. This parameter applies only to Aurora Serverless v1 + DB clusters. To enable or disable the HTTP endpoint for an Aurora PostgreSQL Serverless v2 + or provisioned DB cluster, use the EnableHttpEndpoint and DisableHttpEndpoint operations. + Valid for Cluster Type: Aurora DB clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnableLimitlessDatabase"`: Specifies whether to enable Aurora Limitless Database. You + must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only -- `"EnableHttpEndpoint"`: A value that indicates whether to enable the HTTP endpoint for an - Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, - the HTTP endpoint provides a connectionless web service API for running SQL queries on the - Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS - console with the query editor. For more information, see Using the Data API for Aurora - Serverless v1 in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnablePerformanceInsights"`: A value that indicates whether to turn on Performance - Insights for the DB cluster. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only +- `"EnableLocalWriteForwarding"`: Specifies whether read replicas can forward write + operations to the writer DB instance in the DB cluster. By default, write operations aren't + allowed on reader DB instances. Valid for: Aurora DB clusters only +- `"EnablePerformanceInsights"`: Specifies whether to turn on Performance Insights for the + DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only - `"EngineMode"`: The DB engine mode of the DB cluster, either provisioned or serverless. The DB engine mode can be modified only from serverless to provisioned. For more - information, see CreateDBCluster. Valid for: Aurora DB clusters only + information, see CreateDBCluster. Valid for Cluster Type: Aurora DB clusters only - `"EngineVersion"`: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. If the cluster that you're @@ -6166,28 +7147,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions - --engine postgres --query \"DBEngineVersions[].EngineVersion\" Valid for: Aurora DB - clusters and Multi-AZ DB clusters + --engine postgres --query \"DBEngineVersions[].EngineVersion\" Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. - Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster. - Valid for: Multi-AZ DB clusters only -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the - master user password with Amazon Web Services Secrets Manager, you can turn on this - management. In this case, you can't specify MasterUserPassword. If the DB cluster already - manages the master user password with Amazon Web Services Secrets Manager, and you specify - that the master user password is not managed with Amazon Web Services Secrets Manager, then - you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new - password for the master user specified by MasterUserPassword. For more information, see - Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide - and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"MasterUserPassword"`: The new password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned - on. Valid for: Aurora DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Multi-AZ DB clusters only Constraints: Must be a multiple between + .5 and 50 of the storage amount for the DB cluster. +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the master user + password with Amazon Web Services Secrets Manager, you can turn on this management. In this + case, you can't specify MasterUserPassword. If the DB cluster already manages the master + user password with Amazon Web Services Secrets Manager, and you specify that the master + user password is not managed with Amazon Web Services Secrets Manager, then you must + specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password + for the master user specified by MasterUserPassword. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and + Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUserPassword"`: The new password for the master database user. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 + characters. Can contain any printable ASCII character except \"/\", \"\"\", or \"@\". + Can't be specified if ManageMasterUserPassword is turned on. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The DB cluster @@ -6203,81 +7184,82 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon - Web Services Region. Valid for: Aurora DB clusters and Multi-AZ DB clusters + Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, also set - MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 Valid - for: Multi-AZ DB clusters only + metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a + value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid - for: Multi-AZ DB clusters only -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB cluster. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only + for Cluster Type: Multi-AZ DB clusters only +- `"NetworkType"`: The network type of the DB cluster. The network type is determined by + the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB + clusters only Valid Values: IPV4 | DUAL - `"NewDBClusterIdentifier"`: The new DB cluster identifier for the DB cluster when - renaming a DB cluster. This value is stored as a lowercase string. Constraints: Must - contain from 1 to 63 letters, numbers, or hyphens The first character must be a letter - Can't end with a hyphen or contain two consecutive hyphens Example: my-cluster2 Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"OptionGroupName"`: A value that indicates that the DB cluster should be associated with - the specified option group. DB clusters are associated with a default option group that - can't be modified. + renaming a DB cluster. This value is stored as a lowercase string. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 + letters, numbers, or hyphens. The first character must be a letter. Can't end with a + hyphen or contain two consecutive hyphens. Example: my-cluster2 +- `"OptionGroupName"`: The option group to associate the DB cluster with. DB clusters are + associated with a default option group that can't be modified. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a - different default KMS key for each Amazon Web Services Region. Valid for: Multi-AZ DB - clusters only -- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. Valid for: + different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only -- `"Port"`: The port number on which the DB cluster accepts connections. Constraints: Value - must be 1150-65535 Default: The same port as the original DB cluster. Valid for: Aurora DB - clusters only +- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights + data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, + where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * + 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that + isn't valid, such as 94, Amazon RDS issues an error. +- `"Port"`: The port number on which the DB cluster accepts connections. Valid for Cluster + Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the + original DB cluster. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the - Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be - in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal + Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must + be at least 30 minutes. - `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can - occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is - a 30-minute window selected at random from an 8-hour block of time for each Amazon Web - Services Region, occurring on a random day of the week. To see the time blocks available, - see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. - Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed - by Amazon Web Services Secrets Manager for the master user password. This setting is valid - only if the master user password is managed by RDS in Amazon Web Services Secrets Manager - for the DB cluster. The secret value contains the updated password. For more information, - see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User - Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora - User Guide. Constraints: You must apply the change immediately when rotating the master - user password. Valid for: Aurora DB clusters and Multi-AZ DB clusters + occur, in Universal Coordinated Time (UTC). Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour + block of time for each Amazon Web Services Region, occurring on a random day of the week. + To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance + Window in the Amazon Aurora User Guide. Constraints: Must be in the format + ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. + Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. +- `"RotateMasterUserPassword"`: Specifies whether to rotate the secret managed by Amazon + Web Services Secrets Manager for the master user password. This setting is valid only if + the master user password is managed by RDS in Amazon Web Services Secrets Manager for the + DB cluster. The secret value contains the updated password. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide + and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: + You must apply the change immediately when rotating the master user password. - `"ScalingConfiguration"`: The scaling properties of the DB cluster. You can only modify - scaling properties for DB clusters in serverless DB engine mode. Valid for: Aurora DB - clusters only + scaling properties for DB clusters in serverless DB engine mode. Valid for Cluster Type: + Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: -- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. When - specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid - values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: - aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"VpcSecurityGroupIds"`: A list of VPC security groups that the DB cluster will belong - to. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB + clusters. For information on storage types for Multi-AZ DB clusters, see Settings for + creating Multi-AZ DB clusters. When specified for a Multi-AZ DB cluster, a value for the + Iops parameter is required. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Valid Values: Aurora DB clusters - aurora | aurora-iopt1 Multi-AZ DB clusters + - io1 | io2 | gp3 Default: Aurora DB clusters - aurora Multi-AZ DB clusters - io1 +- `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB + cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters """ function modify_dbcluster( DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -6312,7 +7294,7 @@ end modify_dbcluster_endpoint(dbcluster_endpoint_identifier) modify_dbcluster_endpoint(dbcluster_endpoint_identifier, params::Dict{String,<:Any}) -Modifies the properties of an endpoint in an Amazon Aurora DB cluster. This action only +Modifies the properties of an endpoint in an Amazon Aurora DB cluster. This operation only applies to Aurora DB clusters. # Arguments @@ -6368,7 +7350,7 @@ submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. of 20 parameters can be modified in a single request. After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows -Amazon RDS to fully complete the create action before the parameter group is used as the +Amazon RDS to fully complete the create operation before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter @@ -6533,43 +7515,46 @@ modifications you can make to your DB instance, call DescribeValidDBInstanceModi before you call ModifyDBInstance. # Arguments -- `dbinstance_identifier`: The DB instance identifier. This value is stored as a lowercase - string. Constraints: Must match the identifier of an existing DBInstance. +- `dbinstance_identifier`: The identifier of DB instance to modify. This value is stored as + a lowercase string. Constraints: Must match the identifier of an existing DB instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The new amount of storage in gibibytes (GiB) to allocate for the DB - instance. For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least - 10% greater than the current value. Values that are not at least 10% greater than the - existing value are rounded up so that they are 10% greater than the current value. For the - valid values for allocated storage for each engine, see CreateDBInstance. -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Changing this parameter doesn't result in an outage and the change is - asynchronously applied as soon as possible. This setting doesn't apply to RDS Custom. - Constraints: Major version upgrades must be allowed when specifying a value for the - EngineVersion parameter that is a different major version than the DB instance's current + instance. For RDS for Db2, MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL, + the value supplied must be at least 10% greater than the current value. Values that are not + at least 10% greater than the existing value are rounded up so that they are 10% greater + than the current value. For the valid values for allocated storage for each engine, see + CreateDBInstance. Constraints: When you increase the allocated storage for a DB instance + that uses Provisioned IOPS (gp3, io1, or io2 storage type), you must also specify the Iops + parameter. You can use the current value for Iops. +- `"AllowMajorVersionUpgrade"`: Specifies whether major version upgrades are allowed. + Changing this parameter doesn't result in an outage and the change is asynchronously + applied as soon as possible. This setting doesn't apply to RDS Custom DB instances. + Constraints: Major version upgrades must be allowed when specifying a value for the + EngineVersion parameter that's a different major version than the DB instance's current version. -- `"ApplyImmediately"`: A value that indicates whether the modifications in this request - and any pending modifications are asynchronously applied as soon as possible, regardless of - the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is +- `"ApplyImmediately"`: Specifies whether the modifications in this request and any pending + modifications are asynchronously applied as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled. If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance in the Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor version upgrades are - applied automatically to the DB instance during the maintenance window. An outage occurs - when all the following conditions are met: The automatic upgrade is enabled for the - maintenance window. A newer minor version is available. RDS has enabled automatic - patching for the engine version. If any of the preceding conditions isn't met, RDS - applies the change as soon as possible and doesn't cause an outage. For an RDS Custom DB - instance, set AutoMinorVersionUpgrade to false. Otherwise, the operation returns an error. -- `"AutomationMode"`: The automation mode of the RDS Custom DB instance: full or all - paused. If full, the DB instance automates monitoring and instance recovery. If all paused, - the instance pauses automation for the duration set by ResumeFullAutomationModeMinutes. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor version upgrades are applied + automatically to the DB instance during the maintenance window. An outage occurs when all + the following conditions are met: The automatic upgrade is enabled for the maintenance + window. A newer minor version is available. RDS has enabled automatic patching for the + engine version. If any of the preceding conditions isn't met, Amazon RDS applies the + change as soon as possible and doesn't cause an outage. For an RDS Custom DB instance, + don't enable this setting. Otherwise, the operation returns an error. +- `"AutomationMode"`: The automation mode of the RDS Custom DB instance. If full, the DB + instance automates monitoring and instance recovery. If all paused, the instance pauses + automation for the duration set by ResumeFullAutomationModeMinutes. - `"AwsBackupRecoveryPointArn"`: The Amazon Resource Name (ARN) of the recovery point in - Amazon Web Services Backup. This setting doesn't apply to RDS Custom. + Amazon Web Services Backup. This setting doesn't apply to RDS Custom DB instances. - `"BackupRetentionPeriod"`: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Enabling and disabling backups can result in a brief I/O suspension @@ -6577,38 +7562,36 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon - as possible. Amazon Aurora Not applicable. The retention period for automated backups is - managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses - existing setting Constraints: It must be a value from 0 to 35. It can't be set to 0 if - the DB instance is a source to read replicas. It can't be set to 0 for an RDS Custom for - Oracle DB instance. It can be specified for a MySQL read replica only if the source is - running MySQL 5.6 or later. It can be specified for a PostgreSQL read replica only if the - source is running PostgreSQL 9.3.5. -- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB - instance’s server certificate. This setting doesn't apply to RDS Custom. For more + as possible. This setting doesn't apply to Amazon Aurora DB instances. The retention period + for automated backups is managed by the DB cluster. For more information, see + ModifyDBCluster. Default: Uses existing setting Constraints: Must be a value from 0 to + 35. Can't be set to 0 if the DB instance is a source to read replicas. Can't be set to + 0 for an RDS Custom for Oracle DB instance. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. -- `"CertificateRotationRestart"`: A value that indicates whether the DB instance is - restarted when you rotate your SSL/TLS certificate. By default, the DB instance is - restarted when you rotate your SSL/TLS certificate. The certificate is not updated until - the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to - connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow - the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For - more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating - Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about - rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS - Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom. -- `"CloudwatchLogsExportConfiguration"`: The configuration setting for the log types to be - enabled for export to CloudWatch Logs for a specific DB instance. A change to the +- `"CertificateRotationRestart"`: Specifies whether the DB instance is restarted when you + rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate + your SSL/TLS certificate. The certificate is not updated until the DB instance is + restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB + instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate + instructions for your DB engine to rotate your SSL/TLS certificate: For more information + about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS + Certificate. in the Amazon RDS User Guide. For more information about rotating your + SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the + Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"CloudwatchLogsExportConfiguration"`: The log types to be enabled for export to + CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't - apply to RDS Custom. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - instance to snapshots of the DB instance. By default, tags are not copied. Amazon Aurora - Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value - for an Aurora DB instance has no effect on the DB cluster setting. For more information, - see ModifyDBCluster. + apply to RDS Custom DB instances. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB instance to + snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to + Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting + this value for an Aurora DB instance has no effect on the DB cluster setting. For more + information, see ModifyDBCluster. - `"DBInstanceClass"`: The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for @@ -6617,65 +7600,90 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request. - Default: Uses existing setting + Default: Uses existing setting Constraints: If you are modifying the DB instance class + and upgrading the engine version at the same time, the currently running engine version + must be supported on the specified DB instance class. Otherwise, the operation returns an + error. In this case, first run the operation to upgrade the engine version, and then run it + again to modify the DB instance class. - `"DBParameterGroupName"`: The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are - applied immediately without a reboot. This setting doesn't apply to RDS Custom. Default: - Uses existing setting Constraints: The DB parameter group must be in the same DB parameter + applied immediately without a reboot. This setting doesn't apply to RDS Custom DB + instances. Default: Uses existing setting Constraints: Must be in the same DB parameter group family as the DB instance. - `"DBPortNumber"`: The port number on which the database accepts connections. The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance. If you change the DBPortNumber value, your database restarts regardless of the value of the ApplyImmediately parameter. This setting doesn't - apply to RDS Custom. MySQL Default: 3306 Valid values: 1150-65535 MariaDB Default: - 3306 Valid values: 1150-65535 PostgreSQL Default: 5432 Valid values: 1150-65535 Type: - Integer Oracle Default: 1521 Valid values: 1150-65535 SQL Server Default: 1433 Valid - values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. Amazon - Aurora Default: 3306 Valid values: 1150-65535 + apply to RDS Custom DB instances. Valid Values: 1150-65535 Default: Amazon Aurora - 3306 + RDS for Db2 - 50000 RDS for MariaDB - 3306 RDS for Microsoft SQL Server - 1433 + RDS for MySQL - 3306 RDS for Oracle - 1521 RDS for PostgreSQL - 5432 Constraints: + For RDS for Microsoft SQL Server, the value can't be 1234, 1434, 3260, 3343, 3389, 47001, + or 49152-49156. - `"DBSecurityGroups"`: A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied - as soon as possible. This setting doesn't apply to RDS Custom. Constraints: If supplied, - must match existing DBSecurityGroups. + as soon as possible. This setting doesn't apply to RDS Custom DB instances. Constraints: + If supplied, must match existing DB security groups. - `"DBSubnetGroupName"`: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. Changing the subnet group causes an outage during the change. The change is applied during the next maintenance - window, unless you enable ApplyImmediately. This parameter doesn't apply to RDS Custom. - Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: + window, unless you enable ApplyImmediately. This setting doesn't apply to RDS Custom DB + instances. Constraints: If supplied, must match existing DB subnet group. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. +- `"DedicatedLogVolume"`: Indicates whether the DB instance has a dedicated log volume + (DLV) enabled. +- `"DeletionProtection"`: Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. This + setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion + protection for the DB cluster. For more information, see ModifyDBCluster. DB instances in a + DB cluster can be deleted even when deletion protection is enabled for the DB cluster. +- `"DisableDomain"`: Specifies whether to remove the DB instance from the Active Directory + domain. - `"Domain"`: The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. You must create the domain before this - operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and + operation. Currently, you can create only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. For more information, see Kerberos - Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB + instances. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain - `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the - Directory Service. This setting doesn't apply to RDS Custom. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. For more information - about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the - Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in - the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. This setting doesn't apply to Amazon Aurora. Mapping Amazon - Web Services IAM accounts to database accounts is managed by the DB cluster. For more + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. For more information about RDS on + Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS + User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon + Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. This setting doesn't apply to Amazon Aurora. Mapping Amazon Web + Services IAM accounts to database accounts is managed by the DB cluster. For more information about IAM database authentication, see IAM Database Authentication for MySQL - and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB + instances. +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. This setting doesn't apply to RDS Custom DB instances. - `"Engine"`: The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB to an Oracle Database 21c CDB. Note the following requirements: Make sure that you specify @@ -6693,13 +7701,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. If you specify only - a major version, Amazon RDS will update the DB instance to the default minor version if the + a major version, Amazon RDS updates the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. If the instance that you're modifying - is acting as a read replica, the engine version that you specify must be the same or later + is acting as a read replica, the engine version that you specify must be the same or higher than the version that the source DB instance or cluster is running. In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the - PATCH_DB_FAILURE lifecycle. + PATCH_DB_FAILURE lifecycle. Constraints: If you are upgrading the engine version and + modifying the DB instance class at the same time, the currently running engine version must + be supported on the specified DB instance class. Otherwise, the operation returns an error. + In this case, first run the operation to upgrade the engine version, and then run it again + to modify the DB instance class. - `"Iops"`: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for @@ -6715,39 +7727,45 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating - a DB snapshot of the instance. Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the - value supplied must be at least 10% greater than the current value. Values that are not at - least 10% greater than the existing value are rounded up so that they are 10% greater than - the current value. Default: Uses existing setting + a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for + Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the existing value are rounded + up so that they are 10% greater than the current value. When you increase the Provisioned + IOPS, you must also specify the AllocatedStorage parameter. You can use the current value + for AllocatedStorage. Default: Uses existing setting - `"LicenseModel"`: The license model for the DB instance. This setting doesn't apply to - RDS Custom. Valid values: license-included | bring-your-own-license | - general-public-license -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. If the DB instance doesn't manage the - master user password with Amazon Web Services Secrets Manager, you can turn on this - management. In this case, you can't specify MasterUserPassword. If the DB instance already - manages the master user password with Amazon Web Services Secrets Manager, and you specify - that the master user password is not managed with Amazon Web Services Secrets Manager, then - you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new + Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - + bring-your-own-license RDS for MariaDB - general-public-license RDS for Microsoft SQL + Server - license-included RDS for MySQL - general-public-license RDS for Oracle - + bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. If the DB instance doesn't manage the master user + password with Amazon Web Services Secrets Manager, you can turn on this management. In this + case, you can't specify MasterUserPassword. If the DB instance already manages the master + user password with Amazon Web Services Secrets Manager, and you specify that the master + user password is not managed with Amazon Web Services Secrets Manager, then you must + specify MasterUserPassword. In this case, Amazon RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. -- `"MasterUserPassword"`: The new password for the master user. The password can include - any printable ASCII character except \"/\", \"\"\", or \"@\". Changing this parameter +- `"MasterUserPassword"`: The new password for the master user. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword - element exists in the PendingModifiedValues element of the operation response. This setting - doesn't apply to RDS Custom. Amazon Aurora Not applicable. The password for the master - user is managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses - existing setting Constraints: Can't be specified if ManageMasterUserPassword is turned on. - MariaDB Constraints: Must contain from 8 to 41 characters. Microsoft SQL Server - Constraints: Must contain from 8 to 128 characters. MySQL Constraints: Must contain from - 8 to 41 characters. Oracle Constraints: Must contain from 8 to 30 characters. PostgreSQL - Constraints: Must contain from 8 to 128 characters. Amazon RDS API operations never - return the password, so this action provides a way to regain access to a primary instance - user if the password is lost. This includes restoring privileges that might have been - accidentally revoked. + element exists in the PendingModifiedValues element of the operation response. Amazon RDS + API operations never return the password, so this operation provides a way to regain access + to a primary instance user if the password is lost. This includes restoring privileges that + might have been accidentally revoked. This setting doesn't apply to the following DB + instances: Amazon Aurora (The password for the master user is managed by the DB cluster. + For more information, see ModifyDBCluster.) RDS Custom Default: Uses existing setting + Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include + any printable ASCII character except \"/\", \"\"\", or \"@\". For RDS for Oracle, can't + include the \"&\" (ampersand) or the \"'\" (single quotes) character. Length + Constraints: RDS for Db2 - Must contain from 8 to 255 characters. RDS for MariaDB - + Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 + to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle + - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 + characters. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The DB instance @@ -6768,94 +7786,102 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to - RDS Custom. + RDS Custom DB instances. - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring - metrics are collected for the DB instance. To disable collecting Enhanced Monitoring - metrics, specify 0, which is the default. If MonitoringRoleArn is specified, set - MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. Valid - Values: 0, 1, 5, 10, 15, 30, 60 + metrics are collected for the DB instance. To disable collection of Enhanced Monitoring + metrics, specify 0. If MonitoringRoleArn is specified, set MonitoringInterval to a value + other than 0. This setting doesn't apply to RDS Custom DB instances. Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. This - setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. - Changing this parameter doesn't result in an outage. The change is applied during the next - maintenance window unless the ApplyImmediately parameter is enabled for this request. This - setting doesn't apply to RDS Custom. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB instance. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon RDS User - Guide. -- `"NewDBInstanceIdentifier"`: The new DB instance identifier for the DB instance when - renaming a DB instance. When you change the DB instance identifier, an instance reboot - occurs immediately if you enable ApplyImmediately, or will occur during the next - maintenance window if you disable Apply Immediately. This value is stored as a lowercase - string. This setting doesn't apply to RDS Custom. Constraints: Must contain from 1 to 63 - letters, numbers, or hyphens. The first character must be a letter. Can't end with a - hyphen or contain two consecutive hyphens. Example: mydbinstance -- `"OptionGroupName"`: A value that indicates the DB instance should be associated with the - specified option group. Changing this parameter doesn't result in an outage, with one - exception. If the parameter change results in an option group that enables OEM, it can - cause a brief period, lasting less than a second, during which new connections are rejected - but existing connections aren't interrupted. The change is applied during the next - maintenance window unless the ApplyImmediately parameter is enabled for this request. - Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be - removed from an option group, and that option group can't be removed from a DB instance - after it is associated with a DB instance. This setting doesn't apply to RDS Custom. + setting doesn't apply to RDS Custom DB instances. +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. Changing this + parameter doesn't result in an outage. The change is applied during the next maintenance + window unless the ApplyImmediately parameter is enabled for this request. This setting + doesn't apply to RDS Custom DB instances. +- `"MultiTenant"`: Specifies whether the to convert your DB instance from the single-tenant + configuration to the multi-tenant configuration. This parameter is supported only for RDS + for Oracle CDB instances. During the conversion, RDS creates an initial tenant database and + associates the DB name, master user name, character set, and national character set + metadata with this database. The tags associated with the instance also propagate to the + initial tenant database. You can add more tenant databases to your DB instance by using the + CreateTenantDatabase operation. The conversion to the multi-tenant configuration is + permanent and irreversible, so you can't later convert back to the single-tenant + configuration. When you specify this parameter, you must also specify ApplyImmediately. +- `"NetworkType"`: The network type of the DB instance. The network type is determined by + the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 | DUAL +- `"NewDBInstanceIdentifier"`: The new identifier for the DB instance when renaming a DB + instance. When you change the DB instance identifier, an instance reboot occurs immediately + if you enable ApplyImmediately, or will occur during the next maintenance window if you + disable ApplyImmediately. This value is stored as a lowercase string. This setting doesn't + apply to RDS Custom DB instances. Constraints: Must contain from 1 to 63 letters, + numbers, or hyphens. The first character must be a letter. Can't end with a hyphen or + contain two consecutive hyphens. Example: mydbinstance +- `"OptionGroupName"`: The option group to associate the DB instance with. Changing this + parameter doesn't result in an outage, with one exception. If the parameter change results + in an option group that enables OEM, it can cause a brief period, lasting less than a + second, during which new connections are rejected but existing connections aren't + interrupted. The change is applied during the next maintenance window unless the + ApplyImmediately parameter is enabled for this request. Permanent options, such as the TDE + option for Oracle Advanced Security TDE, can't be removed from an option group, and that + option group can't be removed from a DB instance after it is associated with a DB instance. + This setting doesn't apply to RDS Custom DB instances. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the - key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value - for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a - default KMS key for your Amazon Web Services account. Your Amazon Web Services account has - a different default KMS key for each Amazon Web Services Region. This setting doesn't apply - to RDS Custom. + key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for + PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default + KMS key for your Amazon Web Services account. Your Amazon Web Services account has a + different default KMS key for each Amazon Web Services Region. This setting doesn't apply + to RDS Custom DB instances. - `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. This setting - doesn't apply to RDS Custom. + data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * + 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 + months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention + period that isn't valid, such as 94, Amazon RDS returns an error. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more - information, see Backup window in the Amazon RDS User Guide. Amazon Aurora Not - applicable. The daily time range for creating automated backups is managed by the DB - cluster. For more information, see ModifyDBCluster. Constraints: Must be in the format - hh24:mi-hh24:mi Must be in Universal Time Coordinated (UTC) Must not conflict with the - preferred maintenance window Must be at least 30 minutes -- `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which system - maintenance can occur, which might result in an outage. Changing this parameter doesn't - result in an outage, except in the following situation, and the change is asynchronously - applied as soon as possible. If there are pending actions that cause a reboot, and the - maintenance window is changed to include the current time, then changing this parameter - will cause a reboot of the DB instance. If moving this window to the current time, there - must be at least 30 minutes between the current time and end of the window to ensure - pending changes are applied. For more information, see Amazon RDS Maintenance Window in the - Amazon RDS User Guide. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi - Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes + information, see Backup window in the Amazon RDS User Guide. This setting doesn't apply to + Amazon Aurora DB instances. The daily time range for creating automated backups is managed + by the DB cluster. For more information, see ModifyDBCluster. Constraints: Must be in the + format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred maintenance window. Must be at least 30 minutes. +- `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can + occur, which might result in an outage. Changing this parameter doesn't result in an + outage, except in the following situation, and the change is asynchronously applied as soon + as possible. If there are pending actions that cause a reboot, and the maintenance window + is changed to include the current time, then changing this parameter causes a reboot of the + DB instance. If you change this window to the current time, there must be at least 30 + minutes between the current time and end of the window to ensure pending changes are + applied. For more information, see Amazon RDS Maintenance Window in the Amazon RDS User + Guide. Default: Uses existing setting Constraints: Must be in the format + ddd:hh24:mi-ddd:hh24:mi. The day values must be mon | tue | wed | thu | fri | sat | sun. + Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred + backup window. Must be at least 30 minutes. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. This setting doesn't apply to RDS Custom. -- `"PromotionTier"`: A value that specifies the order in which an Aurora Replica is - promoted to the primary instance after a failure of the existing primary instance. For more - information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. - This setting doesn't apply to RDS Custom. Default: 1 Valid Values: 0 - 15 -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to - DB instances in a VPC. The DB instance must be part of a public subnet and - PubliclyAccessible must be enabled for it to be publicly accessible. Changes to the - PubliclyAccessible parameter are applied immediately regardless of the value of the - ApplyImmediately parameter. + DB instance class of the DB instance. This setting doesn't apply to RDS Custom DB instances. +- `"PromotionTier"`: The order of priority in which an Aurora Replica is promoted to the + primary instance after a failure of the existing primary instance. For more information, + see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting + doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB cluster's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB + cluster is ultimately controlled by the security group it uses. That public access isn't + permitted if the security group assigned to the DB cluster doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. PubliclyAccessible only applies to DB instances in a + VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled + for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied + immediately regardless of the value of the ApplyImmediately parameter. - `"ReplicaMode"`: A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for @@ -6863,46 +7889,47 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide. This setting - doesn't apply to RDS Custom. + doesn't apply to RDS Custom DB instances. - `"ResumeFullAutomationModeMinutes"`: The number of minutes to pause the automation. When - the time period ends, RDS Custom resumes full automation. The minimum value is 60 - (default). The maximum value is 1,440. -- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed - by Amazon Web Services Secrets Manager for the master user password. This setting is valid - only if the master user password is managed by RDS in Amazon Web Services Secrets Manager - for the DB cluster. The secret value contains the updated password. For more information, - see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User - Guide. Constraints: You must apply the change immediately when rotating the master user + the time period ends, RDS Custom resumes full automation. Default: 60 Constraints: Must + be at least 60. Must be no more than 1,440. +- `"RotateMasterUserPassword"`: Specifies whether to rotate the secret managed by Amazon + Web Services Secrets Manager for the master user password. This setting is valid only if + the master user password is managed by RDS in Amazon Web Services Secrets Manager for the + DB cluster. The secret value contains the updated password. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. + Constraints: You must apply the change immediately when rotating the master user password. -- `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This - setting applies only to the gp3 storage type. This setting doesn't apply to RDS Custom or - Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the DB instance. If you - specify Provisioned IOPS (io1), you must also include a value for the Iops parameter. If - you choose to migrate your DB instance from using standard storage to using Provisioned - IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. - The duration of the migration depends on several factors such as database load, storage - size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and - the number of prior scale storage operations. Typical migration times are under 24 hours, - but the process can take up to several days in some cases. During the migration, the DB - instance is available for use, but might experience performance degradation. While the - migration takes place, nightly backups for the instance are suspended. No other Amazon RDS - operations can take place for the instance, including modifying the instance, rebooting the - instance, deleting the instance, creating a read replica for the instance, and creating a - DB snapshot of the instance. Valid values: gp2 | gp3 | io1 | standard Default: io1 if the - Iops parameter is specified, otherwise gp2 +- `"StorageThroughput"`: The storage throughput value for the DB instance. This setting + applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"StorageType"`: The storage type to associate with the DB instance. If you specify io1, + io2, or gp3 you must also include a value for the Iops parameter. If you choose to migrate + your DB instance from using standard storage to using Provisioned IOPS, or from using + Provisioned IOPS to using standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage size, storage type + (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of + prior scale storage operations. Typical migration times are under 24 hours, but the process + can take up to several days in some cases. During the migration, the DB instance is + available for use, but might experience performance degradation. While the migration takes + place, nightly backups for the instance are suspended. No other Amazon RDS operations can + take place for the instance, including modifying the instance, rebooting the instance, + deleting the instance, creating a read replica for the instance, and creating a DB snapshot + of the instance. Valid Values: gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops + parameter is specified. Otherwise, gp2. - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for - TDE encryption. This setting doesn't apply to RDS Custom. + TDE encryption. This setting doesn't apply to RDS Custom DB instances. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to - access the device. This setting doesn't apply to RDS Custom. -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. This setting doesn't apply to RDS - Custom. -- `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to authorize on this DB - instance. This change is asynchronously applied as soon as possible. This setting doesn't - apply to RDS Custom. Amazon Aurora Not applicable. The associated list of EC2 VPC - security groups is managed by the DB cluster. For more information, see ModifyDBCluster. - Constraints: If supplied, must match existing VpcSecurityGroupIds. + access the device. This setting doesn't apply to RDS Custom DB instances. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. This setting doesn't apply to RDS Custom DB + instances. +- `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to associate with this + DB instance. This change is asynchronously applied as soon as possible. This setting + doesn't apply to the following DB instances: Amazon Aurora (The associated list of EC2 + VPC security groups is managed by the DB cluster. For more information, see + ModifyDBCluster.) RDS Custom Constraints: If supplied, must match existing VPC + security group IDs. """ function modify_dbinstance( DBInstanceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -6942,12 +7969,12 @@ a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maxim parameters can be modified in a single request. After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete -the modify action before the parameter group is used as the default for a new DB instance. -This is especially important for parameters that are critical when creating the default -database for a DB instance, such as the character set for the default database defined by -the character_set_database parameter. You can use the Parameter Groups option of the Amazon -RDS console or the DescribeDBParameters command to verify that your DB parameter group has -been created or modified. +the modify operation before the parameter group is used as the default for a new DB +instance. This is especially important for parameters that are critical when creating the +default database for a DB instance, such as the character set for the default database +defined by the character_set_database parameter. You can use the Parameter Groups option of +the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter +group has been created or modified. # Arguments - `dbparameter_group_name`: The name of the DB parameter group. Constraints: If supplied, @@ -7168,13 +8195,100 @@ function modify_dbproxy_target_group( ) end +""" + modify_dbrecommendation(recommendation_id) + modify_dbrecommendation(recommendation_id, params::Dict{String,<:Any}) + +Updates the recommendation status and recommended action status for the specified +recommendation. + +# Arguments +- `recommendation_id`: The identifier of the recommendation to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Locale"`: The language of the modified recommendation. +- `"RecommendedActionUpdates"`: The list of recommended action status to update. You can + update multiple recommended actions at one time. +- `"Status"`: The recommendation status to update. Valid values: active dismissed +""" +function modify_dbrecommendation( + RecommendationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "ModifyDBRecommendation", + Dict{String,Any}("RecommendationId" => RecommendationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_dbrecommendation( + RecommendationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "ModifyDBRecommendation", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("RecommendationId" => RecommendationId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_dbshard_group(dbshard_group_identifier) + modify_dbshard_group(dbshard_group_identifier, params::Dict{String,<:Any}) + +Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or +more settings by specifying these parameters and the new values in the request. + +# Arguments +- `dbshard_group_identifier`: The name of the DB shard group to modify. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxACU"`: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). +""" +function modify_dbshard_group( + DBShardGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "ModifyDBShardGroup", + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_dbshard_group( + DBShardGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "ModifyDBShardGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_dbsnapshot(dbsnapshot_identifier) modify_dbsnapshot(dbsnapshot_identifier, params::Dict{String,<:Any}) Updates a manual DB snapshot with a new engine version. The snapshot can be encrypted or unencrypted, but not shared or public. Amazon RDS supports upgrading DB snapshots for -MySQL, PostgreSQL, and Oracle. This command doesn't apply to RDS Custom. +MySQL, PostgreSQL, and Oracle. This operation doesn't apply to RDS Custom or RDS for Db2. # Arguments - `dbsnapshot_identifier`: The identifier of the DB snapshot to modify. @@ -7183,11 +8297,14 @@ MySQL, PostgreSQL, and Oracle. This command doesn't apply to RDS Custom. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EngineVersion"`: The engine version to upgrade the DB snapshot to. The following are the database engines and engine versions that are available when you upgrade a DB snapshot. - MySQL 5.5.46 (supported for 5.1 DB snapshots) Oracle 12.1.0.2.v8 (supported for - 12.1.0.1 DB snapshots) 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots) - 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) PostgreSQL For the list of engine - versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB - Engine for Amazon RDS. + MySQL For the list of engine versions that are available for upgrading a DB snapshot, see + Upgrading a MySQL DB snapshot engine version in the Amazon RDS User Guide. Oracle + 19.0.0.0.ru-2022-01.rur-2022-01.r1 (supported for 12.2.0.1 DB snapshots) + 19.0.0.0.ru-2022-07.rur-2022-07.r1 (supported for 12.1.0.2 DB snapshots) 12.1.0.2.v8 + (supported for 12.1.0.1 DB snapshots) 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots) + 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) PostgreSQL For the list of engine + versions that are available for upgrading a DB snapshot, see Upgrading a PostgreSQL DB + snapshot engine version in the Amazon RDS User Guide. - `"OptionGroupName"`: The option group to identify with the upgraded DB snapshot. You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more @@ -7366,7 +8483,7 @@ Amazon RDS User Guide or by using the DescribeEventCategories operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Enabled"`: A value that indicates whether to activate the subscription. +- `"Enabled"`: Specifies whether to activate the subscription. - `"EventCategories"`: A list of event categories for a source type (SourceType) that you want to subscribe to. You can see a list of the categories for a given source type in Events in the Amazon RDS User Guide or by using the DescribeEventCategories operation. @@ -7375,8 +8492,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceType"`: The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all - events are returned. Valid values: db-instance | db-cluster | db-parameter-group | - db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | + db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | + custom-engine-version | blue-green-deployment """ function modify_event_subscription( SubscriptionName; aws_config::AbstractAWSConfig=global_aws_config() @@ -7406,53 +8524,99 @@ function modify_event_subscription( end """ - modify_global_cluster() - modify_global_cluster(params::Dict{String,<:Any}) + modify_global_cluster() + modify_global_cluster(params::Dict{String,<:Any}) + +Modifies a setting for an Amazon Aurora global database cluster. You can change one or more +database configuration parameters by specifying these parameters and the new values in the +request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon +Aurora User Guide. This operation only applies to Aurora global database clusters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowMajorVersionUpgrade"`: Specifies whether to allow major version upgrades. + Constraints: Must be enabled if you specify a value for the EngineVersion parameter that's + a different major version than the global cluster's current version. If you upgrade the + major version of a global database, the cluster and DB instance parameter groups are set to + the default parameter groups for the new version. Apply any custom parameter groups after + completing the upgrade. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the global + database cluster. The global database cluster can't be deleted when deletion protection is + enabled. +- `"EngineVersion"`: The version number of the database engine to which you want to + upgrade. To list all of the available engine versions for aurora-mysql (for MySQL-based + Aurora global databases), use the following command: aws rds describe-db-engine-versions + --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' + To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based + Aurora global databases), use the following command: aws rds describe-db-engine-versions + --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == + `true`].[EngineVersion]' +- `"GlobalClusterIdentifier"`: The cluster identifier for the global cluster to modify. + This parameter isn't case-sensitive. Constraints: Must match the identifier of an + existing global database cluster. +- `"NewGlobalClusterIdentifier"`: The new cluster identifier for the global database + cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to + 63 letters, numbers, or hyphens. The first character must be a letter. Can't end with a + hyphen or contain two consecutive hyphens. Example: my-cluster2 +""" +function modify_global_cluster(; aws_config::AbstractAWSConfig=global_aws_config()) + return rds( + "ModifyGlobalCluster"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function modify_global_cluster( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "ModifyGlobalCluster", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + modify_integration(integration_identifier) + modify_integration(integration_identifier, params::Dict{String,<:Any}) -Modify a setting for an Amazon Aurora global cluster. You can change one or more database -configuration parameters by specifying these parameters and the new values in the request. -For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora -User Guide. This action only applies to Aurora DB clusters. +Modifies a zero-ETL integration with Amazon Redshift. Currently, you can only modify +integrations that have Aurora MySQL source DB clusters. Integrations with Aurora PostgreSQL +and RDS sources currently don't support modifying the integration. + +# Arguments +- `integration_identifier`: The unique identifier of the integration to modify. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Constraints: You must allow major version upgrades when specifying a value for the - EngineVersion parameter that is a different major version than the DB cluster's current - version. If you upgrade the major version of a global database, the cluster and DB instance - parameter groups are set to the default parameter groups for the new version. Apply any - custom parameter groups after completing the upgrade. -- `"DeletionProtection"`: Indicates if the global database cluster has deletion protection - enabled. The global database cluster can't be deleted when deletion protection is enabled. -- `"EngineVersion"`: The version number of the database engine to which you want to - upgrade. Changing this parameter results in an outage. The change is applied during the - next maintenance window unless ApplyImmediately is enabled. To list all of the available - engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the - following command: aws rds describe-db-engine-versions --engine aurora-mysql --query - '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' To list all of the available - engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use - the following command: aws rds describe-db-engine-versions --engine aurora-postgresql - --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' -- `"GlobalClusterIdentifier"`: The DB cluster identifier for the global cluster being - modified. This parameter isn't case-sensitive. Constraints: Must match the identifier of - an existing global database cluster. -- `"NewGlobalClusterIdentifier"`: The new cluster identifier for the global database - cluster when modifying a global database cluster. This value is stored as a lowercase - string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens The first - character must be a letter Can't end with a hyphen or contain two consecutive hyphens - Example: my-cluster2 +- `"DataFilter"`: A new data filter for the integration. For more information, see Data + filtering for Aurora zero-ETL integrations with Amazon Redshift. +- `"Description"`: A new description for the integration. +- `"IntegrationName"`: A new name for the integration. """ -function modify_global_cluster(; aws_config::AbstractAWSConfig=global_aws_config()) +function modify_integration( + IntegrationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) return rds( - "ModifyGlobalCluster"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "ModifyIntegration", + Dict{String,Any}("IntegrationIdentifier" => IntegrationIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function modify_global_cluster( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function modify_integration( + IntegrationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return rds( - "ModifyGlobalCluster", - params; + "ModifyIntegration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IntegrationIdentifier" => IntegrationIdentifier), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -7472,8 +8636,8 @@ Modifies an existing option group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ApplyImmediately"`: A value that indicates whether to apply the change immediately or - during the next maintenance window for each instance associated with the option group. +- `"ApplyImmediately"`: Specifies whether to apply the change immediately or during the + next maintenance window for each instance associated with the option group. - `"OptionsToInclude"`: Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration. - `"OptionsToRemove"`: Options in this list are removed from the option group. @@ -7505,6 +8669,70 @@ function modify_option_group( ) end +""" + modify_tenant_database(dbinstance_identifier, tenant_dbname) + modify_tenant_database(dbinstance_identifier, tenant_dbname, params::Dict{String,<:Any}) + +Modifies an existing tenant database in a DB instance. You can change the tenant database +name or the master user password. This operation is supported only for RDS for Oracle CDB +instances using the multi-tenant configuration. + +# Arguments +- `dbinstance_identifier`: The identifier of the DB instance that contains the tenant + database that you are modifying. This parameter isn't case-sensitive. Constraints: Must + match the identifier of an existing DB instance. +- `tenant_dbname`: The user-supplied name of the tenant database that you want to modify. + This parameter isn’t case-sensitive. Constraints: Must match the identifier of an + existing tenant database. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MasterUserPassword"`: The new password for the master user of the specified tenant + database in your DB instance. Amazon RDS operations never return the password, so this + action provides a way to regain access to a tenant database user if the password is lost. + This includes restoring privileges that might have been accidentally revoked. Constraints: + Can include any printable ASCII character except /, \" (double quote), @, & + (ampersand), and ' (single quote). Length constraints: Must contain between 8 and 30 + characters. +- `"NewTenantDBName"`: The new name of the tenant database when renaming a tenant database. + This parameter isn’t case-sensitive. Constraints: Can't be the string null or any other + reserved word. Can't be longer than 8 characters. +""" +function modify_tenant_database( + DBInstanceIdentifier, TenantDBName; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "ModifyTenantDatabase", + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, "TenantDBName" => TenantDBName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_tenant_database( + DBInstanceIdentifier, + TenantDBName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "ModifyTenantDatabase", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DBInstanceIdentifier" => DBInstanceIdentifier, + "TenantDBName" => TenantDBName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ promote_read_replica(dbinstance_identifier) promote_read_replica(dbinstance_identifier, params::Dict{String,<:Any}) @@ -7720,9 +8948,8 @@ cluster, you can reboot the DB cluster with the RebootDBCluster operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ForceFailover"`: A value that indicates whether the reboot is conducted through a - Multi-AZ failover. Constraint: You can't enable force failover if the instance isn't - configured for Multi-AZ. +- `"ForceFailover"`: Specifies whether the reboot is conducted through a Multi-AZ failover. + Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ. """ function reboot_dbinstance( DBInstanceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -7753,6 +8980,47 @@ function reboot_dbinstance( ) end +""" + reboot_dbshard_group(dbshard_group_identifier) + reboot_dbshard_group(dbshard_group_identifier, params::Dict{String,<:Any}) + +You might need to reboot your DB shard group, usually for maintenance reasons. For example, +if you make certain modifications, reboot the DB shard group for the changes to take +effect. This operation applies only to Aurora Limitless Database DBb shard groups. + +# Arguments +- `dbshard_group_identifier`: The name of the DB shard group to reboot. + +""" +function reboot_dbshard_group( + DBShardGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return rds( + "RebootDBShardGroup", + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reboot_dbshard_group( + DBShardGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "RebootDBShardGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DBShardGroupIdentifier" => DBShardGroupIdentifier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_dbproxy_targets(dbproxy_name) register_dbproxy_targets(dbproxy_name, params::Dict{String,<:Any}) @@ -7800,8 +9068,8 @@ end Detaches an Aurora secondary cluster from an Aurora global database cluster. The cluster becomes a standalone cluster with read-write capability instead of being read-only and -receiving data from a primary cluster in a different Region. This action only applies to -Aurora DB clusters. +receiving data from a primary cluster in a different Region. This operation only applies +to Aurora DB clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -8051,9 +9319,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Parameters"`: A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is enabled. -- `"ResetAllParameters"`: A value that indicates whether to reset all parameters in the DB - cluster parameter group to their default values. You can't use this parameter if there is a - list of parameter names specified for the Parameters parameter. +- `"ResetAllParameters"`: Specifies whether to reset all parameters in the DB cluster + parameter group to their default values. You can't use this parameter if there is a list of + parameter names specified for the Parameters parameter. """ function reset_dbcluster_parameter_group( DBClusterParameterGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -8113,9 +9381,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. Oracle Valid Values (for Apply method): pending-reboot -- `"ResetAllParameters"`: A value that indicates whether to reset all parameters in the DB - parameter group to default values. By default, all parameters in the DB parameter group are - reset to default values. +- `"ResetAllParameters"`: Specifies whether to reset all parameters in the DB parameter + group to default values. By default, all parameters in the DB parameter group are reset to + default values. """ function reset_dbparameter_group( DBParameterGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -8153,13 +9421,13 @@ end Creates an Amazon Aurora DB cluster from MySQL data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data from MySQL by Using an -Amazon S3 Bucket in the Amazon Aurora User Guide. This action only restores the DB +Amazon S3 Bucket in the Amazon Aurora User Guide. This operation only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance -action to create DB instances for the restored DB cluster, specifying the identifier of the -restored DB cluster in DBClusterIdentifier. You can create DB instances only after the -RestoreDBClusterFromS3 action has completed and the DB cluster is available. For more +operation to create DB instances for the restored DB cluster, specifying the identifier of +the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the +RestoreDBClusterFromS3 operation has completed and the DB cluster is available. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide. -This action only applies to Aurora DB clusters. The source DB engine must be MySQL. +This operation only applies to Aurora DB clusters. The source DB engine must be MySQL. # Arguments - `dbcluster_identifier`: The name of the DB cluster to create from the source data in the @@ -8177,9 +9445,9 @@ This action only applies to Aurora DB clusters. The source DB engine must be MyS Identity and Access Management (IAM) role that authorizes Amazon RDS to access the Amazon S3 bucket on your behalf. - `source_engine`: The identifier for the database engine that was backed up to create the - files stored in the Amazon S3 bucket. Valid values: mysql + files stored in the Amazon S3 bucket. Valid Values: mysql - `source_engine_version`: The version of the database that the backup files were created - from. MySQL versions 5.5, 5.6, and 5.7 are supported. Example: 5.6.40, 5.7.28 + from. MySQL versions 5.7 and 8.0 are supported. Example: 5.7.40, 8.0.28 # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -8194,8 +9462,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Must be a value from 1 to 35 - `"CharacterSetName"`: A value that indicates that the restored DB cluster should be associated with the specified CharacterSet. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the restored - DB cluster to snapshots of the restored DB cluster. The default is not to copy them. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the restored DB cluster + to snapshots of the restored DB cluster. The default is not to copy them. - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to associate with the restored DB cluster. If this argument is omitted, the default parameter group for the engine version is used. Constraints: If supplied, must match the name of an existing @@ -8204,9 +9472,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup - `"DatabaseName"`: The database name for the restored DB cluster. -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + cluster. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. - `"Domain"`: Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For @@ -8218,15 +9486,29 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Aurora MySQL Possible values are audit, error, general, and slowquery. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. +- `"EngineLifecycleSupport"`: The life cycle type for this DB cluster. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon + RDS Extended Support. At the end of standard support, you can avoid charges for Extended + Support by setting the value to open-source-rds-extended-support-disabled. In this case, + RDS automatically upgrades your restored DB cluster to a higher engine version, if the + major engine version is past its end of standard support date. You can use this setting to + enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can + run the selected major engine version on your DB cluster past the end of standard support + for that engine version. For more information, see the following sections: Amazon Aurora + (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide + Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for + Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: + open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: + open-source-rds-extended-support - `"EngineVersion"`: The version number of the database engine to use. To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command: aws rds describe-db-engine-versions --engine aurora-mysql --query - \"DBEngineVersions[].EngineVersion\" Aurora MySQL Examples: 5.7.mysql_aurora.2.07.1, - 8.0.mysql_aurora.3.02.0 + \"DBEngineVersions[].EngineVersion\" Aurora MySQL Examples: 5.7.mysql_aurora.2.12.0, + 8.0.mysql_aurora.3.04.0 - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key @@ -8234,12 +9516,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value for the KmsKeyId parameter, then Amazon RDS will use your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and - Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User - Guide. Constraints: Can't manage the master user password with Amazon Web Services - Secrets Manager if MasterUserPassword is specified. +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management + with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. Constraints: + Can't manage the master user password with Amazon Web Services Secrets Manager if + MasterUserPassword is specified. - `"MasterUserPassword"`: The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned @@ -8255,7 +9537,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL +- `"NetworkType"`: The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User @@ -8283,9 +9565,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the Amazon Aurora DB cluster. If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. - `"ServerlessV2ScalingConfiguration"`: -- `"StorageEncrypted"`: A value that indicates whether the restored DB cluster is encrypted. +- `"StorageEncrypted"`: Specifies whether the restored DB cluster is encrypted. - `"StorageType"`: Specifies the storage type to be associated with the DB cluster. Valid - values: aurora, aurora-iopt1 Default: aurora Valid for: Aurora DB clusters only + Values: aurora, aurora-iopt1 Default: aurora Valid for: Aurora DB clusters only - `"Tags"`: - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with the restored DB cluster. @@ -8355,10 +9637,10 @@ end Creates a new DB cluster from a DB snapshot or DB cluster snapshot. The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group. This -action only restores the DB cluster, not the DB instances for that DB cluster. You must -invoke the CreateDBInstance action to create DB instances for the restored DB cluster, +operation only restores the DB cluster, not the DB instances for that DB cluster. You must +invoke the CreateDBInstance operation to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create -DB instances only after the RestoreDBClusterFromSnapshot action has completed and the DB +DB instances only after the RestoreDBClusterFromSnapshot operation has completed and the DB cluster is available. For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. @@ -8386,9 +9668,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys set this value to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). Valid for: Aurora DB clusters only -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the restored - DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid - for: Aurora DB clusters and Multi-AZ DB clusters +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the restored DB cluster + to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora + DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full @@ -8405,17 +9687,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys mydbsubnetgroup Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"DatabaseName"`: The database name for the restored DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB - clusters -- `"Domain"`: Specify the Active Directory directory ID to restore the DB cluster in. The - domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL - Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. - For more information, see Kerberos Authentication in the Amazon RDS User Guide. Valid for: - Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + cluster. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"Domain"`: The Active Directory directory ID to restore the DB cluster in. The domain + must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, + Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more + information, see Kerberos Authentication in the Amazon RDS User Guide. Valid for: Aurora + DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to be used when making API calls to the + Directory Service. Valid for: Aurora DB clusters only - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used. RDS for MySQL Possible values are error, general, and slowquery. RDS for @@ -8426,10 +9707,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for: Aurora DB clusters only +- `"EngineLifecycleSupport"`: The life cycle type for this DB cluster. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon + RDS Extended Support. At the end of standard support, you can avoid charges for Extended + Support by setting the value to open-source-rds-extended-support-disabled. In this case, + RDS automatically upgrades your restored DB cluster to a higher engine version, if the + major engine version is past its end of standard support date. You can use this setting to + enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can + run the selected major engine version on your DB cluster past the end of standard support + for that engine version. For more information, see the following sections: Amazon Aurora + (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide + Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for + Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: + open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: + open-source-rds-extended-support - `"EngineMode"`: The DB engine mode of the DB cluster, either provisioned or serverless. For more information, see CreateDBCluster. Valid for: Aurora DB clusters only - `"EngineVersion"`: The version of the database engine to use for the new DB cluster. If @@ -8464,7 +9759,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cluster snapshot. If the DB snapshot or DB cluster snapshot in SnapshotIdentifier isn't encrypted, then the restored DB cluster isn't encrypted. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL +- `"NetworkType"`: The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User @@ -8474,16 +9769,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Port"`: The port number on which the new DB cluster accepts connections. Constraints: This value must be 1150-65535 Default: The same port as the original DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"PubliclyAccessible"`: A value that indicates whether the DB cluster is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with - a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the +- `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the + DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the + private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to + the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is + ultimately controlled by the security group it uses. That public access is not permitted if + the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't + publicly accessible, it is an internal DB cluster with a DNS name that resolves to a + private IP address. Default: The default behavior varies depending on whether + DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't @@ -8491,12 +9786,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"ScalingConfiguration"`: For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. Valid for: Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: - `"StorageType"`: Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid - values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: + Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"Tags"`: The tags to be assigned to the restored DB cluster. Valid for: Aurora DB @@ -8547,63 +9843,59 @@ function restore_dbcluster_from_snapshot( end """ - restore_dbcluster_to_point_in_time(dbcluster_identifier, source_dbcluster_identifier) - restore_dbcluster_to_point_in_time(dbcluster_identifier, source_dbcluster_identifier, params::Dict{String,<:Any}) + restore_dbcluster_to_point_in_time(dbcluster_identifier) + restore_dbcluster_to_point_in_time(dbcluster_identifier, params::Dict{String,<:Any}) Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group. For Aurora, -this action only restores the DB cluster, not the DB instances for that DB cluster. You -must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, -specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create -DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB -cluster is available. For more information on Amazon Aurora DB clusters, see What is -Amazon Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB -clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. +this operation only restores the DB cluster, not the DB instances for that DB cluster. You +must invoke the CreateDBInstance operation to create DB instances for the restored DB +cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You +can create DB instances only after the RestoreDBClusterToPointInTime operation has +completed and the DB cluster is available. For more information on Amazon Aurora DB +clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide. For more information +on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. # Arguments - `dbcluster_identifier`: The name of the new DB cluster to be created. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens First character must be a letter Can't end with a hyphen or contain two consecutive hyphens Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `source_dbcluster_identifier`: The identifier of the source DB cluster from which to - restore. Constraints: Must match the identifier of an existing DBCluster. Valid for: - Aurora DB clusters and Multi-AZ DB clusters # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BacktrackWindow"`: The target backtrack window, in seconds. To disable backtracking, set this value to 0. Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the restored - DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid - for: Aurora DB clusters and Multi-AZ DB clusters +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the restored DB cluster + to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora + DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance class in the - Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only -- `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to associate - with this DB cluster. If this argument is omitted, the default DB cluster parameter group - for the specified engine is used. Constraints: If supplied, must match the name of an - existing DB cluster parameter group. Must be 1 to 255 letters, numbers, or hyphens. - First character must be a letter. Can't end with a hyphen or contain two consecutive - hyphens. Valid for: Aurora DB clusters and Multi-AZ DB clusters + Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only +- `"DBClusterParameterGroupName"`: The name of the custom DB cluster parameter group to + associate with this DB cluster. If the DBClusterParameterGroupName parameter is omitted, + the default DB cluster parameter group for the specified engine is used. Constraints: If + supplied, must match the name of an existing DB cluster parameter group. Must be 1 to 255 + letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen + or contain two consecutive hyphens. Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"DBSubnetGroupName"`: The DB subnet group name to use for the new DB cluster. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB - clusters -- `"Domain"`: Specify the Active Directory directory ID to restore the DB cluster in. The - domain must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS - can use Kerberos Authentication to authenticate users that connect to the DB cluster. For - more information, see Kerberos Authentication in the Amazon Aurora User Guide. Valid for: - Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + cluster. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"Domain"`: The Active Directory directory ID to restore the DB cluster in. The domain + must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS can use + Kerberos Authentication to authenticate users that connect to the DB cluster. For more + information, see Kerberos Authentication in the Amazon Aurora User Guide. Valid for: Aurora + DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to be used when making API calls to the + Directory Service. Valid for: Aurora DB clusters only - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. RDS for MySQL Possible values are error, general, and slowquery. RDS for PostgreSQL Possible @@ -8613,10 +9905,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for: Aurora DB clusters only +- `"EngineLifecycleSupport"`: The life cycle type for this DB cluster. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon + RDS Extended Support. At the end of standard support, you can avoid charges for Extended + Support by setting the value to open-source-rds-extended-support-disabled. In this case, + RDS automatically upgrades your restored DB cluster to a higher engine version, if the + major engine version is past its end of standard support date. You can use this setting to + enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can + run the selected major engine version on your DB cluster past the end of standard support + for that engine version. For more information, see the following sections: Amazon Aurora + (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide + Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for + Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: + open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: + open-source-rds-extended-support - `"EngineMode"`: The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 @@ -8640,7 +9946,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cluster. If the DB cluster isn't encrypted, then the restored DB cluster isn't encrypted. If DBClusterIdentifier refers to a DB cluster that isn't encrypted, then the restore request is rejected. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL +- `"NetworkType"`: The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User @@ -8650,16 +9956,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Port"`: The port number on which the new DB cluster accepts connections. Constraints: A value from 1150-65535. Default: The default port for the engine. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"PubliclyAccessible"`: A value that indicates whether the DB cluster is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with - a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the +- `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the + DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the + private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to + the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is + ultimately controlled by the security group it uses. That public access is not permitted if + the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't + publicly accessible, it is an internal DB cluster with a DNS name that resolves to a + private IP address. Default: The default behavior varies depending on whether + DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't @@ -8667,6 +9973,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Multi-AZ DB clusters only +- `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"RestoreToTime"`: The date and time to restore the DB cluster to. Valid Values: Value must be a time in Universal Coordinated Time (UTC) format Constraints: Must be before the latest restorable time for the DB instance Must be specified if UseLatestRestorableTime @@ -8682,37 +9989,36 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ScalingConfiguration"`: For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. Valid for: Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: +- `"SourceDBClusterIdentifier"`: The identifier of the source DB cluster from which to + restore. Constraints: Must match the identifier of an existing DBCluster. Valid for: + Aurora DB clusters and Multi-AZ DB clusters +- `"SourceDbClusterResourceId"`: The resource ID of the source DB cluster from which to + restore. - `"StorageType"`: Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid - values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: + Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"Tags"`: -- `"UseLatestRestorableTime"`: A value that indicates whether to restore the DB cluster to - the latest restorable backup time. By default, the DB cluster isn't restored to the latest - restorable backup time. Constraints: Can't be specified if RestoreToTime parameter is - provided. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"UseLatestRestorableTime"`: Specifies whether to restore the DB cluster to the latest + restorable backup time. By default, the DB cluster isn't restored to the latest restorable + backup time. Constraints: Can't be specified if RestoreToTime parameter is provided. Valid + for: Aurora DB clusters and Multi-AZ DB clusters - `"VpcSecurityGroupIds"`: A list of VPC security groups that the new DB cluster belongs to. Valid for: Aurora DB clusters and Multi-AZ DB clusters """ function restore_dbcluster_to_point_in_time( - DBClusterIdentifier, - SourceDBClusterIdentifier; - aws_config::AbstractAWSConfig=global_aws_config(), + DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() ) return rds( "RestoreDBClusterToPointInTime", - Dict{String,Any}( - "DBClusterIdentifier" => DBClusterIdentifier, - "SourceDBClusterIdentifier" => SourceDBClusterIdentifier, - ); + Dict{String,Any}("DBClusterIdentifier" => DBClusterIdentifier); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function restore_dbcluster_to_point_in_time( DBClusterIdentifier, - SourceDBClusterIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -8721,10 +10027,7 @@ function restore_dbcluster_to_point_in_time( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "DBClusterIdentifier" => DBClusterIdentifier, - "SourceDBClusterIdentifier" => SourceDBClusterIdentifier, - ), + Dict{String,Any}("DBClusterIdentifier" => DBClusterIdentifier), params, ), ); @@ -8744,20 +10047,20 @@ created as a Single-AZ deployment, except when the instance is a SQL Server inst has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before -you call the RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances +you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the -DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result -is that you replace the original DB instance with the DB instance created from the +DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The +result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. # Arguments -- `dbinstance_identifier`: Name of the DB instance to create from the DB snapshot. This +- `dbinstance_identifier`: The name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive. Constraints: Must contain from 1 to 63 numbers, letters, - or hyphens First character must be a letter Can't end with a hyphen or contain two - consecutive hyphens Example: my-snapshot-id + or hyphens. First character must be a letter. Can't end with a hyphen or contain two + consecutive hyphens. Example: my-snapshot-id # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -8765,9 +10068,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor version upgrades are - applied automatically to the DB instance during the maintenance window. If you restore an - RDS Custom DB instance, you must disable this parameter. +- `"AutoMinorVersionUpgrade"`: Specifies whether to automatically apply minor version + upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB + instance, you must disable this parameter. - `"AvailabilityZone"`: The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a @@ -8775,13 +10078,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the restored DB instance. Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the restored - DB instance to snapshots of the DB instance. In most cases, tags aren't copied by default. - However, when you restore a DB instance from a DB snapshot, RDS checks whether you specify - new tags. If yes, the new tags are added to the restored DB instance. If there are no new - tags, RDS looks for the tags from the source DB instance for the DB snapshot, and then adds - those tags to the restored DB instance. For more information, see Copying tags to DB - instance snapshots in the Amazon RDS User Guide. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the restored DB instance + to snapshots of the DB instance. In most cases, tags aren't copied by default. However, + when you restore a DB instance from a DB snapshot, RDS checks whether you specify new tags. + If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS + looks for the tags from the source DB instance for the DB snapshot, and then adds those + tags to the restored DB instance. For more information, see Copying tags to DB instance + snapshots in the Amazon RDS User Guide. - `"CustomIamInstanceProfile"`: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role @@ -8789,70 +10097,97 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom. -- `"DBClusterSnapshotIdentifier"`: The identifier for the RDS for MySQL Multi-AZ DB cluster - snapshot to restore from. For more information on Multi-AZ DB clusters, see Multi-AZ DB - cluster deployments in the Amazon RDS User Guide. Constraints: Must match the identifier - of an existing Multi-AZ DB cluster snapshot. Can't be specified when DBSnapshotIdentifier - is specified. Must be specified when DBSnapshotIdentifier isn't specified. If you are +- `"DBClusterSnapshotIdentifier"`: The identifier for the Multi-AZ DB cluster snapshot to + restore from. For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster + deployments in the Amazon RDS User Guide. Constraints: Must match the identifier of an + existing Multi-AZ DB cluster snapshot. Can't be specified when DBSnapshotIdentifier is + specified. Must be specified when DBSnapshotIdentifier isn't specified. If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot. Can't be the - identifier of an Aurora DB cluster snapshot. Can't be the identifier of an RDS for - PostgreSQL Multi-AZ DB cluster snapshot. + identifier of an Aurora DB cluster snapshot. - `"DBInstanceClass"`: The compute and memory capacity of the Amazon RDS DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: The same DBInstanceClass as the original DB instance. -- `"DBName"`: The database name for the restored DB instance. This parameter doesn't apply - to the MySQL, PostgreSQL, or MariaDB engines. It also doesn't apply to RDS Custom DB - instances. +- `"DBName"`: The name of the database for the restored DB instance. This parameter only + applies to RDS for Oracle and RDS for SQL Server DB instances. It doesn't apply to the + other engines or to RDS Custom DB instances. - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB instance. If you don't specify a value for DBParameterGroupName, then RDS uses the default DBParameterGroup for the specified DB engine. This setting doesn't apply to RDS Custom. - Constraints: If supplied, must match the name of an existing DBParameterGroup. Must be - 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with - a hyphen or contain two consecutive hyphens. + Constraints: If supplied, must match the name of an existing DB parameter group. Must + be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end + with a hyphen or contain two consecutive hyphens. - `"DBSnapshotIdentifier"`: The identifier for the DB snapshot to restore from. - Constraints: Must match the identifier of an existing DBSnapshot. Can't be specified + Constraints: Must match the identifier of an existing DB snapshot. Can't be specified when DBClusterSnapshotIdentifier is specified. Must be specified when DBClusterSnapshotIdentifier isn't specified. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. -- `"DBSubnetGroupName"`: The DB subnet group name to use for the new instance. Constraints: - If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. -- `"Domain"`: Specify the Active Directory directory ID to restore the DB instance in. The - domain/ must be created prior to this operation. Currently, you can create only MySQL, +- `"DBSubnetGroupName"`: The name of the DB subnet group to use for the new instance. + Constraints: If supplied, must match the name of an existing DB subnet group. Example: + mydbsubnetgroup +- `"DedicatedLogVolume"`: Specifies whether to enable a dedicated log volume (DLV) for the + DB instance. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + instance. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. +- `"Domain"`: The Active Directory directory ID to restore the DB instance in. The domain/ + must be created prior to this operation. Currently, you can create only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. -- `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB instance is to - export to CloudWatch Logs. The values in the list depend on the DB engine being used. For - more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS - User Guide. This setting doesn't apply to RDS Custom. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. This setting doesn't - apply to RDS Custom. For more information about RDS on Outposts, see Working with Amazon - RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information - about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping is disabled. For more information about IAM database authentication, see - IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Constraints: Can't be longer than 64 characters. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain +- `"EnableCloudwatchLogsExports"`: The list of logs for the restored DB instance to export + to CloudWatch Logs. The values in the list depend on the DB engine. For more information, + see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This + setting doesn't apply to RDS Custom. +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. This setting doesn't apply to RDS + Custom. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon + Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see + Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping is disabled. For more information about IAM database authentication, see IAM + Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. - `"Engine"`: The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Default: The same as source Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 - snapshot. Valid Values: mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 - oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex - sqlserver-web + snapshot. Valid Values: db2-ae db2-se mariadb mysql oracle-ee + oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee + sqlserver-se sqlserver-ex sqlserver-web +- `"EngineLifecycleSupport"`: The life cycle type for this DB instance. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB instance into + Amazon RDS Extended Support. At the end of standard support, you can avoid charges for + Extended Support by setting the value to open-source-rds-extended-support-disabled. In this + case, RDS automatically upgrades your restored DB instance to a higher engine version, if + the major engine version is past its end of standard support date. You can use this + setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended + Support, you can run the selected major engine version on your DB instance past the end of + standard support for that engine version. For more information, see Using Amazon RDS + Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL + and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by + the DB cluster. Valid Values: open-source-rds-extended-support | + open-source-rds-extended-support-disabled Default: open-source-rds-extended-support - `"Iops"`: Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS @@ -8861,13 +10196,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be an integer greater than 1000. -- `"LicenseModel"`: License model information for the restored DB instance. This setting - doesn't apply to RDS Custom. Default: Same as source. Valid values: license-included | - bring-your-own-license | general-public-license -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. - This setting doesn't apply to RDS Custom. Constraint: You can't specify the - AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL +- `"LicenseModel"`: License model information for the restored DB instance. License models + for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model + requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace + model requires an Amazon Web Services Marketplace subscription. For more information, see + RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to + Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - + bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license + RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license + RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - + postgresql-license Default: Same as the source. +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. This setting + doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter + if the DB instance is a Multi-AZ deployment. +- `"NetworkType"`: The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User @@ -8880,29 +10222,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys port as the original DB instance Constraints: Value must be 1150-65535 - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. This setting doesn't apply to RDS Custom. -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB instance's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. - Access to the DB instance is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB instance doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. For more information, see - CreateDBInstance. +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB instance's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB instance's VPC. Access to the DB + instance is ultimately controlled by the security group it uses. That public access is not + permitted if the security group assigned to the DB instance doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. For more information, see CreateDBInstance. - `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This setting doesn't apply to RDS Custom or Amazon Aurora. - `"StorageType"`: Specifies the storage type to be associated with the DB instance. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise - gp2 + Values: gp2 | gp3 | io1 | io2 | standard If you specify io1, io2, or gp3, you must also + include a value for the Iops parameter. Default: io1 if the Iops parameter is specified, + otherwise gp2 - `"Tags"`: - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for TDE encryption. This setting doesn't apply to RDS Custom. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to access the device. This setting doesn't apply to RDS Custom. -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. This setting doesn't apply to RDS - Custom. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. This setting doesn't apply to RDS Custom. - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB instance. Default: The default EC2 VPC security group for the DB subnet group's VPC. """ @@ -8943,7 +10283,7 @@ Amazon Relational Database Service (Amazon RDS) supports importing MySQL databas backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS -MySQL DB Instance in the Amazon RDS User Guide. This command doesn't apply to RDS Custom. +MySQL DB Instance in the Amazon RDS User Guide. This operation doesn't apply to RDS Custom. # Arguments - `dbinstance_class`: The compute and memory capacity of the DB instance, for example @@ -8960,7 +10300,9 @@ MySQL DB Instance in the Amazon RDS User Guide. This command doesn't apply to R - `s3_bucket_name`: The name of your Amazon S3 bucket that contains your database backup file. - `s3_ingestion_role_arn`: An Amazon Web Services Identity and Access Management (IAM) role - to allow Amazon RDS to access your Amazon S3 bucket. + with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon + S3 bucket. For information about this role, see Creating an IAM role manually in the + Amazon RDS User Guide. - `source_engine`: The name of the engine of your source database. Valid Values: mysql - `source_engine_version`: The version of the database that the backup files were created from. MySQL versions 5.6 and 5.7 are supported. Example: 5.6.40 @@ -8971,9 +10313,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB instance during the maintenance window. By default, minor - engine upgrades are not applied automatically. +- `"AutoMinorVersionUpgrade"`: Specifies whether to automatically apply minor engine + upgrades to the DB instance during the maintenance window. By default, minor engine + upgrades are not applied automatically. - `"AvailabilityZone"`: The Availability Zone that the DB instance is created in. For information about Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones in the Amazon RDS User Guide. Default: A random, system-chosen @@ -8984,8 +10326,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. For more information, see CreateDBInstance. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - instance to snapshots of the DB instance. By default, tags are not copied. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB instance to + snapshots of the DB instance. By default, tags are not copied. - `"DBName"`: The name of the database to create when the DB instance is created. Follow the naming rules specified in CreateDBInstance. - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB @@ -8995,21 +10342,35 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Default: The default DB security group for the database engine. - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB instance. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. +- `"DedicatedLogVolume"`: Specifies whether to enable a dedicated log volume (DLV) for the + DB instance. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the DB + instance. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information about IAM database authentication, see - IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information about IAM database authentication, see IAM + Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. +- `"EngineLifecycleSupport"`: The life cycle type for this DB instance. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB instance into + Amazon RDS Extended Support. At the end of standard support, you can avoid charges for + Extended Support by setting the value to open-source-rds-extended-support-disabled. In this + case, RDS automatically upgrades your restored DB instance to a higher engine version, if + the major engine version is past its end of standard support date. You can use this + setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended + Support, you can run the selected major engine version on your DB instance past the end of + standard support for that engine version. For more information, see Using Amazon RDS + Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL + and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by + the DB cluster. Valid Values: open-source-rds-extended-support | + open-source-rds-extended-support-disabled Default: open-source-rds-extended-support - `"EngineVersion"`: The version number of the database engine to use. Choose the latest minor version of your database engine. For information about engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. @@ -9024,18 +10385,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - `"LicenseModel"`: The license model for this DB instance. Use general-public-license. -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. - Constraints: Can't manage the master user password with Amazon Web Services Secrets - Manager if MasterUserPassword is specified. -- `"MasterUserPassword"`: The password for the master user. The password can include any - printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Can't be specified - if ManageMasterUserPassword is turned on. MariaDB Constraints: Must contain from 8 to 41 - characters. Microsoft SQL Server Constraints: Must contain from 8 to 128 characters. - MySQL Constraints: Must contain from 8 to 41 characters. Oracle Constraints: Must - contain from 8 to 30 characters. PostgreSQL Constraints: Must contain from 8 to 128 - characters. +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't + manage the master user password with Amazon Web Services Secrets Manager if + MasterUserPassword is specified. +- `"MasterUserPassword"`: The password for the master user. Constraints: Can't be + specified if ManageMasterUserPassword is turned on. Can include any printable ASCII + character except \"/\", \"\"\", or \"@\". For RDS for Oracle, can't include the \"&\" + (ampersand) or the \"'\" (single quotes) character. Length Constraints: RDS for Db2 - + Must contain from 8 to 128 characters. RDS for MariaDB - Must contain from 8 to 41 + characters. RDS for Microsoft SQL Server - Must contain from 8 to 128 characters. RDS + for MySQL - Must contain from 8 to 41 characters. RDS for Oracle - Must contain from 8 to + 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web @@ -9065,9 +10427,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. If - the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. If the DB + instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter. +- `"NetworkType"`: The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User @@ -9100,28 +10462,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Must be at least 30 minutes. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB instance's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. - Access to the DB instance is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB instance doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. For more information, see - CreateDBInstance. +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB instance's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB instance's VPC. Access to the DB + instance is ultimately controlled by the security group it uses. That public access is not + permitted if the security group assigned to the DB instance doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. For more information, see CreateDBInstance. - `"S3Prefix"`: The prefix of your Amazon S3 bucket. -- `"StorageEncrypted"`: A value that indicates whether the new DB instance is encrypted or - not. +- `"StorageEncrypted"`: Specifies whether the new DB instance is encrypted or not. - `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This setting doesn't apply to RDS Custom or Amazon Aurora. - `"StorageType"`: Specifies the storage type to be associated with the DB instance. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise - gp2 + Values: gp2 | gp3 | io1 | io2 | standard If you specify io1, io2, or gp3, you must also + include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; + otherwise gp2 - `"Tags"`: A list of tags to associate with this DB instance. For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. - `"VpcSecurityGroupIds"`: A list of VPC security groups to associate with this DB instance. """ function restore_dbinstance_from_s3( @@ -9194,13 +10554,13 @@ Availability Zone, with the default security group, the default subnet group, an default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and -not a single-AZ deployment. This command doesn't apply to Aurora MySQL and Aurora +not a single-AZ deployment. This operation doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime. # Arguments -- `target_dbinstance_identifier`: The name of the new DB instance to be created. - Constraints: Must contain from 1 to 63 letters, numbers, or hyphens First character - must be a letter Can't end with a hyphen or contain two consecutive hyphens +- `target_dbinstance_identifier`: The name of the new DB instance to create. Constraints: + Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. + Can't end with a hyphen or contain two consecutive hyphens. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -9208,18 +10568,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor version upgrades are - applied automatically to the DB instance during the maintenance window. This setting - doesn't apply to RDS Custom. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor version upgrades are applied + automatically to the DB instance during the maintenance window. This setting doesn't apply + to RDS Custom. - `"AvailabilityZone"`: The Availability Zone (AZ) where the DB instance will be created. - Default: A random, system-chosen Availability Zone. Constraint: You can't specify the - AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a -- `"BackupTarget"`: Specifies where automated backups and manual snapshots are stored for - the restored DB instance. Possible values are outposts (Amazon Web Services Outposts) and - region (Amazon Web Services Region). The default is region. For more information, see - Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the restored - DB instance to snapshots of the DB instance. By default, tags are not copied. + Default: A random, system-chosen Availability Zone. Constraints: You can't specify the + AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: + us-east-1a +- `"BackupTarget"`: The location for storing automated backups and manual snapshots for the + restored DB instance. Valid Values: outposts (Amazon Web Services Outposts) region + (Amazon Web Services Region) Default: region For more information, see Working with + Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the restored DB instance + to snapshots of the DB instance. By default, tags are not copied. - `"CustomIamInstanceProfile"`: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role @@ -9231,114 +10597,147 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: - The same DBInstanceClass as the original DB instance. -- `"DBName"`: The database name for the restored DB instance. This parameter isn't - supported for the MySQL or MariaDB engines. It also doesn't apply to RDS Custom. + The same DB instance class as the original DB instance. +- `"DBName"`: The database name for the restored DB instance. This parameter doesn't apply + to the following DB instances: RDS Custom RDS for Db2 RDS for MariaDB RDS for MySQL + - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB instance. If you do not specify a value for DBParameterGroupName, then the default DBParameterGroup for the specified DB engine is used. This setting doesn't apply to RDS - Custom. Constraints: If supplied, must match the name of an existing DBParameterGroup. - Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't + Custom. Constraints: If supplied, must match the name of an existing DB parameter group. + Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. - `"DBSubnetGroupName"`: The DB subnet group name to use for the new instance. Constraints: - If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. -- `"Domain"`: Specify the Active Directory directory ID to restore the DB instance in. - Create the domain before running this command. Currently, you can create only the MySQL, - Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. - This setting doesn't apply to RDS Custom. For more information, see Kerberos - Authentication in the Amazon RDS User Guide. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. + If supplied, must match the name of an existing DB subnet group. Example: + mydbsubnetgroup +- `"DedicatedLogVolume"`: Specifies whether to enable a dedicated log volume (DLV) for the + DB instance. +- `"DeletionProtection"`: Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. +- `"Domain"`: The Active Directory directory ID to restore the DB instance in. Create the + domain before running this command. Currently, you can create only the MySQL, Microsoft SQL + Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. This setting + doesn't apply to RDS Custom. For more information, see Kerberos Authentication in the + Amazon RDS User Guide. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Constraints: Can't be longer than 64 characters. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. This setting doesn't - apply to RDS Custom. For more information about RDS on Outposts, see Working with Amazon - RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information - about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. This setting doesn't apply to RDS Custom. For more - information about IAM database authentication, see IAM Database Authentication for MySQL - and PostgreSQL in the Amazon RDS User Guide. +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. This setting doesn't apply to RDS + Custom. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon + Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see + Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. This setting doesn't apply to RDS Custom. For more information about + IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in + the Amazon RDS User Guide. - `"Engine"`: The database engine to use for the new instance. This setting doesn't apply - to RDS Custom. Default: The same as source Constraint: Must be compatible with the engine - of the source Valid Values: mariadb mysql oracle-ee oracle-ee-cdb - oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se - sqlserver-ex sqlserver-web -- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be - initially allocated for the DB instance. Constraints: Must be an integer greater than 1000. - SQL Server Setting the IOPS value for the SQL Server database engine isn't supported. -- `"LicenseModel"`: License model information for the restored DB instance. This setting - doesn't apply to RDS Custom. Default: Same as source. Valid values: license-included | - bring-your-own-license | general-public-license + to RDS Custom. Valid Values: db2-ae db2-se mariadb mysql oracle-ee + oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee + sqlserver-se sqlserver-ex sqlserver-web Default: The same as source Constraints: + Must be compatible with the engine of the source. +- `"EngineLifecycleSupport"`: The life cycle type for this DB instance. By default, this + value is set to open-source-rds-extended-support, which enrolls your DB instance into + Amazon RDS Extended Support. At the end of standard support, you can avoid charges for + Extended Support by setting the value to open-source-rds-extended-support-disabled. In this + case, RDS automatically upgrades your restored DB instance to a higher engine version, if + the major engine version is past its end of standard support date. You can use this + setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended + Support, you can run the selected major engine version on your DB instance past the end of + standard support for that engine version. For more information, see Using Amazon RDS + Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL + and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by + the DB cluster. Valid Values: open-source-rds-extended-support | + open-source-rds-extended-support-disabled Default: open-source-rds-extended-support +- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to + initially allocate for the DB instance. This setting doesn't apply to SQL Server. + Constraints: Must be an integer greater than 1000. +- `"LicenseModel"`: The license model information for the restored DB instance. License + models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) + model requires a custom parameter group. The Db2 license through Amazon Web Services + Marketplace model requires an Amazon Web Services Marketplace subscription. For more + information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting + doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - + bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license + RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license + RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - + postgresql-license Default: Same as the source. - `"MaxAllocatedStorage"`: The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. - This setting doesn't apply to RDS Custom. Constraint: You can't specify the - AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB instance. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon RDS User - Guide. -- `"OptionGroupName"`: The name of the option group to be used for the restored DB - instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't - be removed from an option group, and that option group can't be removed from a DB instance +- `"MultiAZ"`: Secifies whether the DB instance is a Multi-AZ deployment. This setting + doesn't apply to RDS Custom. Constraints: You can't specify the AvailabilityZone + parameter if the DB instance is a Multi-AZ deployment. +- `"NetworkType"`: The network type of the DB instance. The network type is determined by + the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 DUAL +- `"OptionGroupName"`: The name of the option group to use for the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be + removed from an option group, and that option group can't be removed from a DB instance after it is associated with a DB instance This setting doesn't apply to RDS Custom. -- `"Port"`: The port number on which the database accepts connections. Constraints: Value - must be 1150-65535 Default: The same port as the original DB instance. +- `"Port"`: The port number on which the database accepts connections. Default: The same + port as the original DB instance. Constraints: The value must be 1150-65535. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. This setting doesn't apply to RDS Custom. -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. For more information, see - CreateDBInstance. -- `"RestoreTime"`: The date and time to restore from. Valid Values: Value must be a time in - Universal Coordinated Time (UTC) format Constraints: Must be before the latest restorable - time for the DB instance Can't be specified if the UseLatestRestorableTime parameter is - enabled Example: 2009-09-07T23:45:00Z +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB cluster's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB + cluster is ultimately controlled by the security group it uses. That public access isn't + permitted if the security group assigned to the DB cluster doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. For more information, see CreateDBInstance. +- `"RestoreTime"`: The date and time to restore from. Constraints: Must be a time in + Universal Coordinated Time (UTC) format. Must be before the latest restorable time for + the DB instance. Can't be specified if the UseLatestRestorableTime parameter is enabled. + Example: 2009-09-07T23:45:00Z - `"SourceDBInstanceAutomatedBackupsArn"`: The Amazon Resource Name (ARN) of the replicated automated backups from which to restore, for example, - arn:aws:rds:useast-1:123456789012:auto-backup:ab-L2IJCEXJP7XQ7HOJ4SIEXAMPLE. This setting + arn:aws:rds:us-east-1:123456789012:auto-backup:ab-L2IJCEXJP7XQ7HOJ4SIEXAMPLE. This setting doesn't apply to RDS Custom. - `"SourceDBInstanceIdentifier"`: The identifier of the source DB instance from which to restore. Constraints: Must match the identifier of an existing DB instance. - `"SourceDbiResourceId"`: The resource ID of the source DB instance from which to restore. -- `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This - setting doesn't apply to RDS Custom or Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the DB instance. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise - gp2 +- `"StorageThroughput"`: The storage throughput value for the DB instance. This setting + doesn't apply to RDS Custom or Amazon Aurora. +- `"StorageType"`: The storage type to associate with the DB instance. Valid Values: gp2 | + gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. Otherwise, + gp2. Constraints: If you specify io1, io2, or gp3, you must also include a value for the + Iops parameter. - `"Tags"`: - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for TDE encryption. This setting doesn't apply to RDS Custom. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to access the device. This setting doesn't apply to RDS Custom. -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. This setting doesn't apply to RDS - Custom. -- `"UseLatestRestorableTime"`: A value that indicates whether the DB instance is restored - from the latest backup time. By default, the DB instance isn't restored from the latest - backup time. Constraints: Can't be specified if the RestoreTime parameter is provided. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. This setting doesn't apply to RDS Custom. +- `"UseLatestRestorableTime"`: Specifies whether the DB instance is restored from the + latest backup time. By default, the DB instance isn't restored from the latest backup time. + Constraints: Can't be specified if the RestoreTime parameter is provided. - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB instance. Default: The default EC2 VPC security group for the DB subnet group's VPC. """ @@ -9503,9 +10902,9 @@ end start_dbcluster(dbcluster_identifier, params::Dict{String,<:Any}) Starts an Amazon Aurora DB cluster that was stopped using the Amazon Web Services console, -the stop-db-cluster CLI command, or the StopDBCluster action. For more information, see -Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide. This action only -applies to Aurora DB clusters. +the stop-db-cluster CLI command, or the StopDBCluster operation. For more information, see +Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide. This operation +only applies to Aurora DB clusters. # Arguments - `dbcluster_identifier`: The DB cluster identifier of the Amazon Aurora DB cluster to be @@ -9546,8 +10945,8 @@ end start_dbinstance(dbinstance_identifier, params::Dict{String,<:Any}) Starts an Amazon RDS DB instance that was stopped using the Amazon Web Services console, -the stop-db-instance CLI command, or the StopDBInstance action. For more information, see -Starting an Amazon RDS DB instance That Was Previously Stopped in the Amazon RDS User +the stop-db-instance CLI command, or the StopDBInstance operation. For more information, +see Starting an Amazon RDS DB instance That Was Previously Stopped in the Amazon RDS User Guide. This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora DB clusters, use StartDBCluster instead. @@ -9651,12 +11050,12 @@ end start_export_task(export_task_identifier, iam_role_arn, kms_key_id, s3_bucket_name, source_arn, params::Dict{String,<:Any}) Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must -have access to the S3 bucket. You can't export snapshot data from RDS Custom DB instances. -You can't export cluster data from Multi-AZ DB clusters. For more information on exporting -DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS User Guide -or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User Guide. For -more information on exporting DB cluster data, see Exporting DB cluster data to Amazon S3 -in the Amazon Aurora User Guide. +have access to the S3 bucket. You can't export snapshot data from Db2 or RDS Custom DB +instances. You can't export cluster data from Multi-AZ DB clusters. For more information on +exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS +User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User +Guide. For more information on exporting DB cluster data, see Exporting DB cluster data to +Amazon S3 in the Amazon Aurora User Guide. # Arguments - `export_task_identifier`: A unique identifier for the export task. This ID isn't an @@ -9684,13 +11083,13 @@ in the Amazon Aurora User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ExportOnly"`: The data to be exported from the snapshot or cluster. If this parameter - is not provided, all of the data is exported. Valid values are the following: database - - Export all the data from a specified database. database.table table-name - Export a - table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for - MariaDB, and Aurora MySQL. database.schema schema-name - Export a database schema of the - snapshot or cluster. This format is valid only for RDS for PostgreSQL and Aurora - PostgreSQL. database.schema.table table-name - Export a table of the database schema. - This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. + isn't provided, all of the data is exported. Valid Values: database - Export all the + data from a specified database. database.table table-name - Export a table of the + snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and + Aurora MySQL. database.schema schema-name - Export a database schema of the snapshot or + cluster. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. + database.schema.table table-name - Export a table of the database schema. This format is + valid only for RDS for PostgreSQL and Aurora PostgreSQL. - `"S3Prefix"`: The Amazon S3 bucket prefix to use as the file name and path of the exported data. """ @@ -9749,7 +11148,7 @@ end stop_activity_stream(resource_arn, params::Dict{String,<:Any}) Stops a database activity stream that was started using the Amazon Web Services console, -the start-activity-stream CLI command, or the StartActivityStream action. For more +the start-activity-stream CLI command, or the StartActivityStream operation. For more information, see Monitoring Amazon Aurora with Database Activity Streams in the Amazon Aurora User Guide or Monitoring Amazon RDS with Database Activity Streams in the Amazon RDS User Guide. @@ -9796,7 +11195,7 @@ Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains th cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide. -This action only applies to Aurora DB clusters. +This operation only applies to Aurora DB clusters. # Arguments - `dbcluster_identifier`: The DB cluster identifier of the Amazon Aurora DB cluster to be @@ -9930,18 +11329,18 @@ end Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon -RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using +RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Arguments -- `blue_green_deployment_identifier`: The blue/green deployment identifier. Constraints: - Must match an existing blue/green deployment identifier. +- `blue_green_deployment_identifier`: The unique identifier of the blue/green deployment. + Constraints: Must match an existing blue/green deployment identifier. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SwitchoverTimeout"`: The amount of time, in seconds, for the switchover to complete. - The default is 300. If the switchover takes longer than the specified duration, then any - changes are rolled back, and no changes are made to the environments. + Default: 300 If the switchover takes longer than the specified duration, then any changes + are rolled back, and no changes are made to the environments. """ function switchover_blue_green_deployment( BlueGreenDeploymentIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -9974,6 +11373,71 @@ function switchover_blue_green_deployment( ) end +""" + switchover_global_cluster(global_cluster_identifier, target_db_cluster_identifier) + switchover_global_cluster(global_cluster_identifier, target_db_cluster_identifier, params::Dict{String,<:Any}) + +Switches over the specified secondary DB cluster to be the new primary DB cluster in the +global database cluster. Switchover operations were previously called \"managed planned +failovers.\" Aurora promotes the specified secondary cluster to assume full read/write +capabilities and demotes the current primary cluster to a secondary (read-only) cluster, +maintaining the orginal replication topology. All secondary clusters are synchronized with +the primary at the beginning of the process so the new primary continues operations for the +Aurora global database without losing any data. Your database is unavailable for a short +time while the primary and selected secondary clusters are assuming their new roles. For +more information about switching over an Aurora global database, see Performing switchovers +for Amazon Aurora global databases in the Amazon Aurora User Guide. This operation is +intended for controlled environments, for operations such as \"regional rotation\" or to +fall back to the original primary after a global database failover. + +# Arguments +- `global_cluster_identifier`: The identifier of the global database cluster to switch + over. This parameter isn't case-sensitive. Constraints: Must match the identifier of an + existing global database cluster (Aurora global database). +- `target_db_cluster_identifier`: The identifier of the secondary Aurora DB cluster to + promote to the new primary for the global database cluster. Use the Amazon Resource Name + (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services + Region. + +""" +function switchover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "SwitchoverGlobalCluster", + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function switchover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rds( + "SwitchoverGlobalCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ switchover_read_replica(dbinstance_identifier) switchover_read_replica(dbinstance_identifier, params::Dict{String,<:Any}) diff --git a/src/services/rds_data.jl b/src/services/rds_data.jl index 6f807fc27a..d2748a2cce 100644 --- a/src/services/rds_data.jl +++ b/src/services/rds_data.jl @@ -190,8 +190,9 @@ end execute_sql(aws_secret_store_arn, db_cluster_or_instance_arn, sql_statements) execute_sql(aws_secret_store_arn, db_cluster_or_instance_arn, sql_statements, params::Dict{String,<:Any}) -Runs one or more SQL statements. This operation is deprecated. Use the -BatchExecuteStatement or ExecuteStatement operation. +Runs one or more SQL statements. This operation isn't supported for Aurora PostgreSQL +Serverless v2 and provisioned DB clusters, and for Aurora Serverless v1 DB clusters, the +operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation. # Arguments - `aws_secret_store_arn`: The Amazon Resource Name (ARN) of the secret that enables access diff --git a/src/services/redshift.jl b/src/services/redshift.jl index c873961ab7..ad2fa4ed8f 100644 --- a/src/services/redshift.jl +++ b/src/services/redshift.jl @@ -129,13 +129,14 @@ association, the consumer can consume the datashare. # Arguments - `data_share_arn`: The Amazon Resource Name (ARN) of the datashare that the consumer is to - use with the account or the namespace. + use. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowWrites"`: If set to true, allows write operations for a datashare. - `"AssociateEntireAccount"`: A value that specifies whether the datashare is associated with the entire account. -- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer that is associated with +- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer namespace associated with the datashare. - `"ConsumerRegion"`: From a datashare consumer account, associates a datashare with all existing and future namespaces in the specified Amazon Web Services Region. @@ -237,9 +238,12 @@ producer account must have the correct access permissions. - `consumer_identifier`: The identifier of the data consumer that is authorized to access the datashare. This identifier is an Amazon Web Services account ID or a keyword, such as ADX. -- `data_share_arn`: The Amazon Resource Name (ARN) of the datashare that producers are to - authorize sharing for. +- `data_share_arn`: The Amazon Resource Name (ARN) of the datashare namespace that + producers are to authorize sharing for. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowWrites"`: If set to true, allows write operations for a datashare. """ function authorize_data_share( ConsumerIdentifier, DataShareArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -328,8 +332,11 @@ Amazon Redshift Cluster Management Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SnapshotArn"`: The Amazon Resource Name (ARN) of the snapshot to authorize access to. - `"SnapshotClusterIdentifier"`: The identifier of the cluster the snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. If the snapshot to access doesn't exist and the associated IAM policy doesn't + allow access to all (*) snapshots - This parameter is required. Otherwise, permissions + aren't available to check if the snapshot exists. If the snapshot to access exists - + This parameter isn't required. Redshift can retrieve the cluster identifier and use it to + validate snapshot authorization. - `"SnapshotIdentifier"`: The identifier of the snapshot the account is authorized to restore. """ @@ -513,9 +520,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. The default value is -1. - `"SourceSnapshotClusterIdentifier"`: The identifier of the cluster the source snapshot - was created from. This parameter is required if your IAM user or role has a policy - containing a snapshot resource element that specifies anything other than * for the cluster - name. Constraints: Must be the identifier for a valid cluster. + was created from. This parameter is required if your IAM user has a policy containing a + snapshot resource element that specifies anything other than * for the cluster name. + Constraints: Must be the identifier for a valid cluster. """ function copy_cluster_snapshot( SourceSnapshotIdentifier, @@ -606,8 +613,8 @@ function create_authentication_profile( end """ - create_cluster(cluster_identifier, master_user_password, master_username, node_type) - create_cluster(cluster_identifier, master_user_password, master_username, node_type, params::Dict{String,<:Any}) + create_cluster(cluster_identifier, master_username, node_type) + create_cluster(cluster_identifier, master_username, node_type, params::Dict{String,<:Any}) Creates a new cluster with the specified parameters. To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group @@ -623,22 +630,16 @@ Redshift Cluster Management Guide. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Must be unique for all clusters within an Amazon Web Services account. Example: myexamplecluster -- `master_user_password`: The password associated with the admin user for the cluster that - is being created. Constraints: Must be between 8 and 64 characters in length. Must - contain at least one uppercase letter. Must contain at least one lowercase letter. Must - contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' - (single quote), \" (double quote), , /, or @. -- `master_username`: The user name associated with the admin user for the cluster that is - being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. The user - name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, plus - sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. - Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of +- `master_username`: The user name associated with the admin user account for the cluster + that is being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. + The user name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, + plus sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. + Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide. - `node_type`: The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid - Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | - ra3.xlplus | ra3.4xlarge | ra3.16xlarge + Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -710,6 +711,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys their Amazon Resource Name (ARN) format. The maximum number of IAM roles that you can associate is subject to a quota. For more information, go to Quotas and limits in the Amazon Redshift Cluster Management Guide. +- `"IpAddressType"`: The IP address types that the cluster supports. Possible values are + ipv4 and dualstack. - `"KmsKeyId"`: The Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster. - `"LoadSampleData"`: A flag that specifies whether to load sample data once the cluster is @@ -717,10 +720,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaintenanceTrackName"`: An optional parameter for the name of the maintenance track for the cluster. If you don't provide a maintenance track name, the cluster is assigned to the current track. +- `"ManageMasterPassword"`: If true, Amazon Redshift uses Secrets Manager to manage this + cluster's admin credentials. You can't use MasterUserPassword if ManageMasterPassword is + true. If ManageMasterPassword is false or not set, Amazon Redshift uses MasterUserPassword + for the admin user account's password. - `"ManualSnapshotRetentionPeriod"`: The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. The value must be either -1 or an integer between 1 and 3,653. +- `"MasterPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the cluster's admin credentials secret. You can only use this parameter + if ManageMasterPassword is true. +- `"MasterUserPassword"`: The password associated with the admin user account for the + cluster that is being created. You can't use MasterUserPassword if ManageMasterPassword is + true. Constraints: Must be between 8 and 64 characters in length. Must contain at least + one uppercase letter. Must contain at least one lowercase letter. Must contain one + number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), + \" (double quote), , /, or @. +- `"MultiAZ"`: If true, Amazon Redshift will deploy the cluster in two Availability Zones + (AZ). - `"NumberOfNodes"`: The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift @@ -731,7 +749,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Port"`: The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 - Valid Values: 1150-65535 + Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or + 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you + change the port to these ranges.) For clusters with dc2 nodes - Select a port within the + range 1150-65535. - `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random @@ -739,6 +760,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. - `"PubliclyAccessible"`: If true, the cluster can be accessed from a public network. +- `"RedshiftIdcApplicationArn"`: The Amazon resource name (ARN) of the Amazon Redshift IAM + Identity Center application. - `"SnapshotScheduleIdentifier"`: A unique identifier for the snapshot schedule. - `"Tags"`: A list of tag instances. - `"VpcSecurityGroupIds"`: A list of Virtual Private Cloud (VPC) security groups to be @@ -747,7 +770,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys """ function create_cluster( ClusterIdentifier, - MasterUserPassword, MasterUsername, NodeType; aws_config::AbstractAWSConfig=global_aws_config(), @@ -756,7 +778,6 @@ function create_cluster( "CreateCluster", Dict{String,Any}( "ClusterIdentifier" => ClusterIdentifier, - "MasterUserPassword" => MasterUserPassword, "MasterUsername" => MasterUsername, "NodeType" => NodeType, ); @@ -766,7 +787,6 @@ function create_cluster( end function create_cluster( ClusterIdentifier, - MasterUserPassword, MasterUsername, NodeType, params::AbstractDict{String}; @@ -779,7 +799,6 @@ function create_cluster( _merge, Dict{String,Any}( "ClusterIdentifier" => ClusterIdentifier, - "MasterUserPassword" => MasterUserPassword, "MasterUsername" => MasterUsername, "NodeType" => NodeType, ), @@ -1044,6 +1063,63 @@ function create_cluster_subnet_group( ) end +""" + create_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name) + create_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name, params::Dict{String,<:Any}) + +Used to create a custom domain name for a cluster. Properties include the custom domain +name, the cluster the custom domain is associated with, and the certificate Amazon Resource +Name (ARN). + +# Arguments +- `cluster_identifier`: The cluster identifier that the custom domain is associated with. +- `custom_domain_certificate_arn`: The certificate Amazon Resource Name (ARN) for the + custom domain name association. +- `custom_domain_name`: The custom domain name for a custom domain association. + +""" +function create_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateCustomDomainAssociation", + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_endpoint_access(endpoint_name, subnet_group_name) create_endpoint_access(endpoint_name, subnet_group_name, params::Dict{String,<:Any}) @@ -1322,6 +1398,78 @@ function create_hsm_configuration( ) end +""" + create_redshift_idc_application(iam_role_arn, idc_display_name, idc_instance_arn, redshift_idc_application_name) + create_redshift_idc_application(iam_role_arn, idc_display_name, idc_instance_arn, redshift_idc_application_name, params::Dict{String,<:Any}) + +Creates an Amazon Redshift application for use with IAM Identity Center. + +# Arguments +- `iam_role_arn`: The IAM role ARN for the Amazon Redshift IAM Identity Center application + instance. It has the required permissions to be assumed and invoke the IDC Identity Center + API. +- `idc_display_name`: The display name for the Amazon Redshift IAM Identity Center + application instance. It appears in the console. +- `idc_instance_arn`: The Amazon resource name (ARN) of the IAM Identity Center instance + where Amazon Redshift creates a new managed application. +- `redshift_idc_application_name`: The name of the Redshift application in IAM Identity + Center. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AuthorizedTokenIssuerList"`: The token issuer list for the Amazon Redshift IAM Identity + Center application instance. +- `"IdentityNamespace"`: The namespace for the Amazon Redshift IAM Identity Center + application instance. It determines which managed application verifies the connection token. +- `"ServiceIntegrations"`: A collection of service integrations for the Redshift IAM + Identity Center application. +""" +function create_redshift_idc_application( + IamRoleArn, + IdcDisplayName, + IdcInstanceArn, + RedshiftIdcApplicationName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateRedshiftIdcApplication", + Dict{String,Any}( + "IamRoleArn" => IamRoleArn, + "IdcDisplayName" => IdcDisplayName, + "IdcInstanceArn" => IdcInstanceArn, + "RedshiftIdcApplicationName" => RedshiftIdcApplicationName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_redshift_idc_application( + IamRoleArn, + IdcDisplayName, + IdcInstanceArn, + RedshiftIdcApplicationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateRedshiftIdcApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IamRoleArn" => IamRoleArn, + "IdcDisplayName" => IdcDisplayName, + "IdcInstanceArn" => IdcInstanceArn, + "RedshiftIdcApplicationName" => RedshiftIdcApplicationName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_scheduled_action(iam_role, schedule, scheduled_action_name, target_action) create_scheduled_action(iam_role, schedule, scheduled_action_name, target_action, params::Dict{String,<:Any}) @@ -1617,8 +1765,8 @@ From a datashare producer account, removes authorization from the specified data - `consumer_identifier`: The identifier of the data consumer that is to have authorization removed from the datashare. This identifier is an Amazon Web Services account ID or a keyword, such as ADX. -- `data_share_arn`: The Amazon Resource Name (ARN) of the datashare to remove authorization - from. +- `data_share_arn`: The namespace Amazon Resource Name (ARN) of the datashare to remove + authorization from. """ function deauthorize_data_share( @@ -1862,7 +2010,7 @@ authorizations before you can delete the snapshot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SnapshotClusterIdentifier"`: The unique identifier of the cluster the snapshot was - created from. This parameter is required if your IAM user or role has a policy containing a + created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the name of valid cluster. """ @@ -1932,6 +2080,53 @@ function delete_cluster_subnet_group( ) end +""" + delete_custom_domain_association(cluster_identifier, custom_domain_name) + delete_custom_domain_association(cluster_identifier, custom_domain_name, params::Dict{String,<:Any}) + +Contains information about deleting a custom domain association for a cluster. + +# Arguments +- `cluster_identifier`: The identifier of the cluster to delete a custom domain association + for. +- `custom_domain_name`: The custom domain name for the custom domain association. + +""" +function delete_custom_domain_association( + ClusterIdentifier, CustomDomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DeleteCustomDomainAssociation", + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, "CustomDomainName" => CustomDomainName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_custom_domain_association( + ClusterIdentifier, + CustomDomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "DeleteCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainName" => CustomDomainName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_endpoint_access(endpoint_name) delete_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -2152,6 +2347,82 @@ function delete_partner( ) end +""" + delete_redshift_idc_application(redshift_idc_application_arn) + delete_redshift_idc_application(redshift_idc_application_arn, params::Dict{String,<:Any}) + +Deletes an Amazon Redshift IAM Identity Center application. + +# Arguments +- `redshift_idc_application_arn`: The ARN for a deleted Amazon Redshift IAM Identity Center + application. + +""" +function delete_redshift_idc_application( + RedshiftIdcApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DeleteRedshiftIdcApplication", + Dict{String,Any}("RedshiftIdcApplicationArn" => RedshiftIdcApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_redshift_idc_application( + RedshiftIdcApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "DeleteRedshiftIdcApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RedshiftIdcApplicationArn" => RedshiftIdcApplicationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_resource_policy(resource_arn) + delete_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Deletes the resource policy for a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource of which its resource + policy is deleted. + +""" +function delete_resource_policy( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DeleteResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "DeleteResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_scheduled_action(scheduled_action_name) delete_scheduled_action(scheduled_action_name, params::Dict{String,<:Any}) @@ -2669,7 +2940,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum - 20, maximum 500. + 20, maximum 100. - `"OwnerAccount"`: The Amazon Web Services account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your Amazon Web Services account, or do not @@ -2897,6 +3168,40 @@ function describe_clusters( ) end +""" + describe_custom_domain_associations() + describe_custom_domain_associations(params::Dict{String,<:Any}) + +Contains information about custom domain associations for a cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CustomDomainCertificateArn"`: The certificate Amazon Resource Name (ARN) for the custom + domain association. +- `"CustomDomainName"`: The custom domain name for the custom domain association. +- `"Marker"`: The marker for the custom domain association. +- `"MaxRecords"`: The maximum records setting for the associated custom domain. +""" +function describe_custom_domain_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeCustomDomainAssociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_custom_domain_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeCustomDomainAssociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_data_shares() describe_data_shares(params::Dict{String,<:Any}) @@ -2905,7 +3210,7 @@ Shows the status of any inbound or outbound datashares available in the specifie # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DataShareArn"`: The identifier of the datashare to describe details of. +- `"DataShareArn"`: The Amazon resource name (ARN) of the datashare to describe details of. - `"Marker"`: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataShares request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of the @@ -2938,8 +3243,8 @@ account identifier. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer that returns in the list - of datashares. +- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer namespace that returns in + the list of datashares. - `"Marker"`: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDataSharesForConsumer request exceed the value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of @@ -2991,8 +3296,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. -- `"ProducerArn"`: The Amazon Resource Name (ARN) of the producer that returns in the list - of datashares. +- `"ProducerArn"`: The Amazon Resource Name (ARN) of the producer namespace that returns in + the list of datashares. - `"Status"`: An identifier giving the status of a datashare in the producer. If this field is specified, Amazon Redshift returns the list of datashares that have the specified status. """ @@ -3406,6 +3711,45 @@ function describe_hsm_configurations( ) end +""" + describe_inbound_integrations() + describe_inbound_integrations(params::Dict{String,<:Any}) + +Returns a list of inbound integrations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IntegrationArn"`: The Amazon Resource Name (ARN) of the inbound integration. +- `"Marker"`: An optional parameter that specifies the starting point to return a set of + response records. When the results of a DescribeInboundIntegrations request exceed the + value specified in MaxRecords, Amazon Web Services returns a value in the Marker field of + the response. You can retrieve the next set of response records by providing the returned + marker value in the Marker parameter and retrying the request. +- `"MaxRecords"`: The maximum number of response records to return in each call. If the + number of remaining response records exceeds the specified MaxRecords value, a value is + returned in a marker field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. Default: 100 Constraints: minimum + 20, maximum 100. +- `"TargetArn"`: The Amazon Resource Name (ARN) of the target of an inbound integration. +""" +function describe_inbound_integrations(; aws_config::AbstractAWSConfig=global_aws_config()) + return redshift( + "DescribeInboundIntegrations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_inbound_integrations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeInboundIntegrations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_logging_status(cluster_identifier) describe_logging_status(cluster_identifier, params::Dict{String,<:Any}) @@ -3608,6 +3952,46 @@ function describe_partners( ) end +""" + describe_redshift_idc_applications() + describe_redshift_idc_applications(params::Dict{String,<:Any}) + +Lists the Amazon Redshift IAM Identity Center applications. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: A value that indicates the starting point for the next set of response + records in a subsequent request. If a value is returned in a response, you can retrieve the + next set of records by providing this returned marker value in the Marker parameter and + retrying the command. If the Marker field is empty, all response records have been + retrieved for the request. +- `"MaxRecords"`: The maximum number of response records to return in each call. If the + number of remaining response records exceeds the specified MaxRecords value, a value is + returned in a marker field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. +- `"RedshiftIdcApplicationArn"`: The ARN for the Redshift application that integrates with + IAM Identity Center. +""" +function describe_redshift_idc_applications(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeRedshiftIdcApplications"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_redshift_idc_applications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeRedshiftIdcApplications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_reserved_node_exchange_status() describe_reserved_node_exchange_status(params::Dict{String,<:Any}) @@ -4175,8 +4559,8 @@ From a datashare consumer account, remove association for the specified datashar # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer that association for the - datashare is removed from. +- `"ConsumerArn"`: The Amazon Resource Name (ARN) of the consumer namespace that + association for the datashare is removed from. - `"ConsumerRegion"`: From a datashare consumer account, removes association of a datashare from all the existing and future namespaces in the specified Amazon Web Services Region. - `"DisassociateEntireAccount"`: A value that specifies whether association for the @@ -4323,8 +4707,47 @@ function enable_snapshot_copy( end """ - get_cluster_credentials(cluster_identifier, db_user) - get_cluster_credentials(cluster_identifier, db_user, params::Dict{String,<:Any}) + failover_primary_compute(cluster_identifier) + failover_primary_compute(cluster_identifier, params::Dict{String,<:Any}) + +Fails over the primary compute unit of the specified Multi-AZ cluster to another +Availability Zone. + +# Arguments +- `cluster_identifier`: The unique identifier of the cluster for which the primary compute + unit will be failed over to another Availability Zone. + +""" +function failover_primary_compute( + ClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "FailoverPrimaryCompute", + Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function failover_primary_compute( + ClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "FailoverPrimaryCompute", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_cluster_credentials(db_user) + get_cluster_credentials(db_user, params::Dict{String,<:Any}) Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with @@ -4344,8 +4767,6 @@ specified, the IAM policy must allow access to the resource dbname for the speci database name. # Arguments -- `cluster_identifier`: The unique identifier of the cluster that contains the database for - which you are requesting credentials. This parameter is case sensitive. - `db_user`: The name of a database user. If a user name matching DbUser exists in the database, the temporary user credentials have the same permissions as the existing user. If DbUser doesn't exist in the database and Autocreate is True, a new user is created using @@ -4363,6 +4784,9 @@ database name. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoCreate"`: Create a database user with the name specified for the user named in DbUser if one does not exist. +- `"ClusterIdentifier"`: The unique identifier of the cluster that contains the database + for which you are requesting credentials. This parameter is case sensitive. +- `"CustomDomainName"`: The custom domain name for the cluster credentials. - `"DbGroups"`: A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC. Database group name @@ -4381,41 +4805,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DurationSeconds"`: The number of seconds until the returned temporary password expires. Constraint: minimum 900, maximum 3600. Default: 900 """ -function get_cluster_credentials( - ClusterIdentifier, DbUser; aws_config::AbstractAWSConfig=global_aws_config() -) +function get_cluster_credentials(DbUser; aws_config::AbstractAWSConfig=global_aws_config()) return redshift( "GetClusterCredentials", - Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier, "DbUser" => DbUser); + Dict{String,Any}("DbUser" => DbUser); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_cluster_credentials( - ClusterIdentifier, - DbUser, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + DbUser, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( "GetClusterCredentials", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClusterIdentifier" => ClusterIdentifier, "DbUser" => DbUser - ), - params, - ), - ); + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("DbUser" => DbUser), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - get_cluster_credentials_with_iam(cluster_identifier) - get_cluster_credentials_with_iam(cluster_identifier, params::Dict{String,<:Any}) + get_cluster_credentials_with_iam() + get_cluster_credentials_with_iam(params::Dict{String,<:Any}) Returns a database user name and temporary password with temporary authorization to log in to an Amazon Redshift database. The database user is mapped 1:1 to the source Identity and @@ -4426,12 +4837,11 @@ operation must have an IAM policy attached that allows access to all necessary a resources. For more information about permissions, see Using identity-based policies (IAM policies) in the Amazon Redshift Cluster Management Guide. -# Arguments -- `cluster_identifier`: The unique identifier of the cluster that contains the database for - which you are requesting credentials. - # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClusterIdentifier"`: The unique identifier of the cluster that contains the database + for which you are requesting credentials. +- `"CustomDomainName"`: The custom domain name for the IAM message cluster credentials. - `"DbName"`: The name of the database for which you are requesting credentials. If the database name is specified, the IAM policy must allow access to the resource dbname for the specified database name. If the database name is not specified, access to all databases is @@ -4439,28 +4849,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DurationSeconds"`: The number of seconds until the returned temporary password expires. Range: 900-3600. Default: 900. """ -function get_cluster_credentials_with_iam( - ClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +function get_cluster_credentials_with_iam(; + aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( - "GetClusterCredentialsWithIAM", - Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier); + "GetClusterCredentialsWithIAM"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_cluster_credentials_with_iam( - ClusterIdentifier, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( "GetClusterCredentialsWithIAM", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier), params - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -4562,6 +4965,83 @@ function get_reserved_node_exchange_offerings( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Get the resource policy for a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource of which its resource + policy is fetched. + +""" +function get_resource_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return redshift( + "GetResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "GetResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_recommendations() + list_recommendations(params::Dict{String,<:Any}) + +List the Amazon Redshift Advisor recommendations for one or multiple Amazon Redshift +clusters in an Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClusterIdentifier"`: The unique identifier of the Amazon Redshift cluster for which the + list of Advisor recommendations is returned. If the neither the cluster identifier and the + cluster namespace ARN parameters are specified, then recommendations for all clusters in + the account are returned. +- `"Marker"`: A value that indicates the starting point for the next set of response + records in a subsequent request. If a value is returned in a response, you can retrieve the + next set of records by providing this returned marker value in the Marker parameter and + retrying the command. If the Marker field is empty, all response records have been + retrieved for the request. +- `"MaxRecords"`: The maximum number of response records to return in each call. If the + number of remaining response records exceeds the specified MaxRecords value, a value is + returned in a marker field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. +- `"NamespaceArn"`: The Amazon Redshift cluster namespace Amazon Resource Name (ARN) for + which the list of Advisor recommendations is returned. If the neither the cluster + identifier and the cluster namespace ARN parameters are specified, then recommendations for + all clusters in the account are returned. +""" +function list_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return redshift( + "ListRecommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "ListRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_aqua_configuration(cluster_identifier) modify_aqua_configuration(cluster_identifier, params::Dict{String,<:Any}) @@ -4725,6 +5205,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. - `"HsmConfigurationIdentifier"`: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. +- `"IpAddressType"`: The IP address types that the cluster supports. Possible values are + ipv4 and dualstack. - `"KmsKeyId"`: The Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster. - `"MaintenanceTrackName"`: The name for the maintenance track that you want to assign for @@ -4732,20 +5214,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied. +- `"ManageMasterPassword"`: If true, Amazon Redshift uses Secrets Manager to manage this + cluster's admin credentials. You can't use MasterUserPassword if ManageMasterPassword is + true. If ManageMasterPassword is false or not set, Amazon Redshift uses MasterUserPassword + for the admin user account's password. - `"ManualSnapshotRetentionPeriod"`: The default for number of days that a newly created manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely. This value doesn't retroactively change the retention periods of existing manual snapshots. The value must be either -1 or an integer between 1 and 3,653. The default value is -1. +- `"MasterPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the cluster's admin credentials secret. You can only use this parameter + if ManageMasterPassword is true. - `"MasterUserPassword"`: The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the - PendingModifiedValues element of the operation response. Operations never return the - password, so this operation provides a way to regain access to the admin user for a cluster - if the password is lost. Default: Uses existing setting. Constraints: Must be between 8 - and 64 characters in length. Must contain at least one uppercase letter. Must contain - at least one lowercase letter. Must contain one number. Can be any printable ASCII - character (ASCII code 33-126) except ' (single quote), \" (double quote), , /, or @. + PendingModifiedValues element of the operation response. You can't use MasterUserPassword + if ManageMasterPassword is true. Operations never return the password, so this operation + provides a way to regain access to the admin user account for a cluster if the password is + lost. Default: Uses existing setting. Constraints: Must be between 8 and 64 characters + in length. Must contain at least one uppercase letter. Must contain at least one + lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII + code 33-126) except ' (single quote), \" (double quote), , /, or @. +- `"MultiAZ"`: If true and the cluster is currently only deployed in a single Availability + Zone, the cluster will be modified to be deployed in two Availability Zones. - `"NewClusterIdentifier"`: The new identifier for the cluster. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic characters must be lowercase. First character must be a letter. Cannot end with a hyphen or contain two consecutive @@ -4754,13 +5246,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NodeType"`: The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. - Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge - | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge - `"NumberOfNodes"`: The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. Valid Values: Integer greater than 0. -- `"Port"`: The option to change the port of an Amazon Redshift cluster. +- `"Port"`: The option to change the port of an Amazon Redshift cluster. Valid Values: + For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If + you have an existing cluster with ra3 nodes, it isn't required that you change the port to + these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. - `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new @@ -5152,6 +5646,62 @@ function modify_cluster_subnet_group( ) end +""" + modify_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name) + modify_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name, params::Dict{String,<:Any}) + +Contains information for changing a custom domain association. + +# Arguments +- `cluster_identifier`: The identifier of the cluster to change a custom domain association + for. +- `custom_domain_certificate_arn`: The certificate Amazon Resource Name (ARN) for the + changed custom domain association. +- `custom_domain_name`: The custom domain name for a changed custom domain association. + +""" +function modify_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "ModifyCustomDomainAssociation", + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "ModifyCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_endpoint_access(endpoint_name) modify_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -5251,6 +5801,60 @@ function modify_event_subscription( ) end +""" + modify_redshift_idc_application(redshift_idc_application_arn) + modify_redshift_idc_application(redshift_idc_application_arn, params::Dict{String,<:Any}) + +Changes an existing Amazon Redshift IAM Identity Center application. + +# Arguments +- `redshift_idc_application_arn`: The ARN for the Redshift application that integrates with + IAM Identity Center. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AuthorizedTokenIssuerList"`: The authorized token issuer list for the Amazon Redshift + IAM Identity Center application to change. +- `"IamRoleArn"`: The IAM role ARN associated with the Amazon Redshift IAM Identity Center + application to change. It has the required permissions to be assumed and invoke the IDC + Identity Center API. +- `"IdcDisplayName"`: The display name for the Amazon Redshift IAM Identity Center + application to change. It appears on the console. +- `"IdentityNamespace"`: The namespace for the Amazon Redshift IAM Identity Center + application to change. It determines which managed application verifies the connection + token. +- `"ServiceIntegrations"`: A collection of service integrations associated with the + application. +""" +function modify_redshift_idc_application( + RedshiftIdcApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "ModifyRedshiftIdcApplication", + Dict{String,Any}("RedshiftIdcApplicationArn" => RedshiftIdcApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_redshift_idc_application( + RedshiftIdcApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "ModifyRedshiftIdcApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RedshiftIdcApplicationArn" => RedshiftIdcApplicationArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_scheduled_action(scheduled_action_name) modify_scheduled_action(scheduled_action_name, params::Dict{String,<:Any}) @@ -5552,6 +6156,48 @@ function purchase_reserved_node_offering( ) end +""" + put_resource_policy(policy, resource_arn) + put_resource_policy(policy, resource_arn, params::Dict{String,<:Any}) + +Updates the resource policy for a specified resource. + +# Arguments +- `policy`: The content of the resource policy being updated. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource of which its resource + policy is updated. + +""" +function put_resource_policy( + Policy, ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "PutResourcePolicy", + Dict{String,Any}("Policy" => Policy, "ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_resource_policy( + Policy, + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "PutResourcePolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Policy" => Policy, "ResourceArn" => ResourceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ reboot_cluster(cluster_identifier) reboot_cluster(cluster_identifier, params::Dict{String,<:Any}) @@ -5681,10 +6327,9 @@ Changes the size of the cluster. You can change the cluster's type, or change th type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You -can only resize clusters of the following types: dc1.large (if your cluster is in a VPC) - dc1.8xlarge (if your cluster is in a VPC) dc2.large dc2.8xlarge ds2.xlarge -ds2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add -must match the node type for the cluster. +can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus +ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for +the cluster. # Arguments - `cluster_identifier`: The unique identifier for the cluster to resize. @@ -5802,6 +6447,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys their Amazon Resource Name (ARN) format. The maximum number of IAM roles that you can associate is subject to a quota. For more information, go to Quotas and limits in the Amazon Redshift Cluster Management Guide. +- `"IpAddressType"`: The IP address type for the cluster. Possible values are ipv4 and + dualstack. - `"KmsKeyId"`: The Key Management Service (KMS) key ID of the encryption key that encrypts data in the cluster restored from a shared snapshot. You can also provide the key ID when you restore from an unencrypted snapshot to an encrypted cluster in the same account. @@ -5814,25 +6461,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. +- `"ManageMasterPassword"`: If true, Amazon Redshift uses Secrets Manager to manage the + restored cluster's admin credentials. If ManageMasterPassword is false or not set, Amazon + Redshift uses the admin credentials the cluster had at the time the snapshot was taken. - `"ManualSnapshotRetentionPeriod"`: The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. The value must be either -1 or an integer between 1 and 3,653. -- `"NodeType"`: The node type that the restored cluster will be provisioned with. Default: - The node type of the cluster from which the snapshot was taken. You can modify this if you - are using any DS node type. In that case, you can choose to restore into another DS node - type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or - ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same - instance type and size. In other words, you can only restore a dc1.large instance type into - another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge - to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large - cluster. For more information about node types, see About Clusters and Nodes in the Amazon - Redshift Cluster Management Guide. +- `"MasterPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the cluster's admin credentials secret. You can only use this parameter + if ManageMasterPassword is true. +- `"MultiAZ"`: If true, the snapshot will be restored to a cluster deployed in two + Availability Zones. +- `"NodeType"`: The node type that the restored cluster will be provisioned with. If you + have a DC instance type, you must restore into that same instance type and size. In other + words, you can only restore a dc2.large node type into another dc2 type. For more + information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster + Management Guide. - `"NumberOfNodes"`: The number of nodes specified when provisioning the restored cluster. - `"OwnerAccount"`: The Amazon Web Services account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. - `"Port"`: The port number on which the cluster accepts connections. Default: The same - port as the original cluster. Constraints: Must be between 1115 and 65535. + port as the original cluster. Valid values: For clusters with DC2 nodes, must be within the + range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or + 8191-8215. - `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: The value selected for the cluster from which the snapshot was taken. For more information about the @@ -5845,8 +6497,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys message to restore from a cluster. You must specify this parameter or snapshotIdentifier, but not both. - `"SnapshotClusterIdentifier"`: The name of the cluster the source snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. This parameter is required if your IAM user has a policy containing a snapshot + resource element that specifies anything other than * for the cluster name. - `"SnapshotIdentifier"`: The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive. You must specify this parameter or snapshotArn, but not both. Example: my-snapshot-id @@ -6111,8 +6763,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SnapshotArn"`: The Amazon Resource Name (ARN) of the snapshot associated with the message to revoke access. - `"SnapshotClusterIdentifier"`: The identifier of the cluster the snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. This parameter is required if your IAM user has a policy containing a snapshot + resource element that specifies anything other than * for the cluster name. - `"SnapshotIdentifier"`: The identifier of the snapshot that the account can no longer access. """ diff --git a/src/services/redshift_serverless.jl b/src/services/redshift_serverless.jl index ee65bfd1f9..a322e899d4 100644 --- a/src/services/redshift_serverless.jl +++ b/src/services/redshift_serverless.jl @@ -54,6 +54,61 @@ function convert_recovery_point_to_snapshot( ) end +""" + create_custom_domain_association(custom_domain_certificate_arn, custom_domain_name, workgroup_name) + create_custom_domain_association(custom_domain_certificate_arn, custom_domain_name, workgroup_name, params::Dict{String,<:Any}) + +Creates a custom domain association for Amazon Redshift Serverless. + +# Arguments +- `custom_domain_certificate_arn`: The custom domain name’s certificate Amazon resource + name (ARN). +- `custom_domain_name`: The custom domain name to associate with the workgroup. +- `workgroup_name`: The name of the workgroup associated with the database. + +""" +function create_custom_domain_association( + customDomainCertificateArn, + customDomainName, + workgroupName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "CreateCustomDomainAssociation", + Dict{String,Any}( + "customDomainCertificateArn" => customDomainCertificateArn, + "customDomainName" => customDomainName, + "workgroupName" => workgroupName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_custom_domain_association( + customDomainCertificateArn, + customDomainName, + workgroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "CreateCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "customDomainCertificateArn" => customDomainCertificateArn, + "customDomainName" => customDomainName, + "workgroupName" => workgroupName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_endpoint_access(endpoint_name, subnet_ids, workgroup_name) create_endpoint_access(endpoint_name, subnet_ids, workgroup_name, params::Dict{String,<:Any}) @@ -70,6 +125,8 @@ Creates an Amazon Redshift Serverless managed VPC endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ownerAccount"`: The owner Amazon Web Services account for the Amazon Redshift + Serverless workgroup. - `"vpcSecurityGroupIds"`: The unique identifiers of the security group that defines the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint. @@ -127,8 +184,11 @@ Creates a namespace in Amazon Redshift Serverless. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"adminPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the namespace's admin credentials secret. You can only use this parameter + if manageAdminPassword is true. - `"adminUserPassword"`: The password of the administrator for the first database created - in the namespace. + in the namespace. You can't use adminUserPassword if manageAdminPassword is true. - `"adminUsername"`: The username of the administrator for the first database created in the namespace. - `"dbName"`: The name of the first database created in the namespace. @@ -139,6 +199,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encrypt your data. - `"logExports"`: The types of logs the namespace can export. Available export types are userlog, connectionlog, and useractivitylog. +- `"manageAdminPassword"`: If true, Amazon Redshift uses Secrets Manager to manage the + namespace's admin credentials. You can't use adminUserPassword if manageAdminPassword is + true. If manageAdminPassword is false or not set, Amazon Redshift uses adminUserPassword + for the admin user account's password. +- `"redshiftIdcApplicationArn"`: The ARN for the Redshift application that integrates with + IAM Identity Center. - `"tags"`: A list of tag instances. """ function create_namespace(namespaceName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -164,6 +230,93 @@ function create_namespace( ) end +""" + create_scheduled_action(namespace_name, role_arn, schedule, scheduled_action_name, target_action) + create_scheduled_action(namespace_name, role_arn, schedule, scheduled_action_name, target_action, params::Dict{String,<:Any}) + +Creates a scheduled action. A scheduled action contains a schedule and an Amazon Redshift +API action. For example, you can create a schedule of when to run the CreateSnapshot API +operation. + +# Arguments +- `namespace_name`: The name of the namespace for which to create a scheduled action. +- `role_arn`: The ARN of the IAM role to assume to run the scheduled action. This IAM role + must have permission to run the Amazon Redshift Serverless API operation in the scheduled + action. This IAM role must allow the Amazon Redshift scheduler to schedule creating + snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your + behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster + Management Guide +- `schedule`: The schedule for a one-time (at timestamp format) or recurring (cron format) + scheduled action. Schedule invocations must be separated by at least one hour. Times are in + UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. + Format of cron expression is (Minutes Hours Day-of-month Month Day-of-week Year). For + example, \"(0 10 ? * MON *)\". For more information, see Cron Expressions in the Amazon + CloudWatch Events User Guide. +- `scheduled_action_name`: The name of the scheduled action. +- `target_action`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"enabled"`: Indicates whether the schedule is enabled. If false, the scheduled action + does not trigger. For more information about state of the scheduled action, see + ScheduledAction. +- `"endTime"`: The end time in UTC when the schedule is no longer active. After this time, + the scheduled action does not trigger. +- `"scheduledActionDescription"`: The description of the scheduled action. +- `"startTime"`: The start time in UTC when the schedule is active. Before this time, the + scheduled action does not trigger. +""" +function create_scheduled_action( + namespaceName, + roleArn, + schedule, + scheduledActionName, + targetAction; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "CreateScheduledAction", + Dict{String,Any}( + "namespaceName" => namespaceName, + "roleArn" => roleArn, + "schedule" => schedule, + "scheduledActionName" => scheduledActionName, + "targetAction" => targetAction, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_scheduled_action( + namespaceName, + roleArn, + schedule, + scheduledActionName, + targetAction, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "CreateScheduledAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "namespaceName" => namespaceName, + "roleArn" => roleArn, + "schedule" => schedule, + "scheduledActionName" => scheduledActionName, + "targetAction" => targetAction, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_snapshot(namespace_name, snapshot_name) create_snapshot(namespace_name, snapshot_name, params::Dict{String,<:Any}) @@ -212,6 +365,60 @@ function create_snapshot( ) end +""" + create_snapshot_copy_configuration(destination_region, namespace_name) + create_snapshot_copy_configuration(destination_region, namespace_name, params::Dict{String,<:Any}) + +Creates a snapshot copy configuration that lets you copy snapshots to another Amazon Web +Services Region. + +# Arguments +- `destination_region`: The destination Amazon Web Services Region that you want to copy + snapshots to. +- `namespace_name`: The name of the namespace to copy snapshots from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"destinationKmsKeyId"`: The KMS key to use to encrypt your snapshots in the destination + Amazon Web Services Region. +- `"snapshotRetentionPeriod"`: The retention period of the snapshots that you copy to the + destination Amazon Web Services Region. +""" +function create_snapshot_copy_configuration( + destinationRegion, namespaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "CreateSnapshotCopyConfiguration", + Dict{String,Any}( + "destinationRegion" => destinationRegion, "namespaceName" => namespaceName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_snapshot_copy_configuration( + destinationRegion, + namespaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "CreateSnapshotCopyConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinationRegion" => destinationRegion, + "namespaceName" => namespaceName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_usage_limit(amount, resource_arn, usage_type) create_usage_limit(amount, resource_arn, usage_type, params::Dict{String,<:Any}) @@ -286,13 +493,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"baseCapacity"`: The base data warehouse capacity of the workgroup in Redshift Processing Units (RPUs). - `"configParameters"`: An array of parameters to set for advanced control over a database. - The options are auto_mv, datestyle, enable_case_sensitivity_identifier, - enable_user_activity_logging, query_group, search_path, and query monitoring metrics that - let you define performance boundaries. For more information about query monitoring rules - and available metrics, see Query monitoring metrics for Amazon Redshift Serverless. + The options are auto_mv, datestyle, enable_case_sensitive_identifier, + enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and + query monitoring metrics that let you define performance boundaries. For more information + about query monitoring rules and available metrics, see Query monitoring metrics for + Amazon Redshift Serverless. - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. +- `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to + serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are 5431-5455 and 8191-8215. The default is 5439. - `"publiclyAccessible"`: A value that specifies whether the workgroup can be accessed from @@ -335,6 +545,51 @@ function create_workgroup( ) end +""" + delete_custom_domain_association(custom_domain_name, workgroup_name) + delete_custom_domain_association(custom_domain_name, workgroup_name, params::Dict{String,<:Any}) + +Deletes a custom domain association for Amazon Redshift Serverless. + +# Arguments +- `custom_domain_name`: The custom domain name associated with the workgroup. +- `workgroup_name`: The name of the workgroup associated with the database. + +""" +function delete_custom_domain_association( + customDomainName, workgroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "DeleteCustomDomainAssociation", + Dict{String,Any}( + "customDomainName" => customDomainName, "workgroupName" => workgroupName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_custom_domain_association( + customDomainName, + workgroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "DeleteCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "customDomainName" => customDomainName, "workgroupName" => workgroupName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_endpoint_access(endpoint_name) delete_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -444,6 +699,45 @@ function delete_resource_policy( ) end +""" + delete_scheduled_action(scheduled_action_name) + delete_scheduled_action(scheduled_action_name, params::Dict{String,<:Any}) + +Deletes a scheduled action. + +# Arguments +- `scheduled_action_name`: The name of the scheduled action to delete. + +""" +function delete_scheduled_action( + scheduledActionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "DeleteScheduledAction", + Dict{String,Any}("scheduledActionName" => scheduledActionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_scheduled_action( + scheduledActionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "DeleteScheduledAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("scheduledActionName" => scheduledActionName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_snapshot(snapshot_name) delete_snapshot(snapshot_name, params::Dict{String,<:Any}) @@ -477,6 +771,47 @@ function delete_snapshot( ) end +""" + delete_snapshot_copy_configuration(snapshot_copy_configuration_id) + delete_snapshot_copy_configuration(snapshot_copy_configuration_id, params::Dict{String,<:Any}) + +Deletes a snapshot copy configuration + +# Arguments +- `snapshot_copy_configuration_id`: The ID of the snapshot copy configuration to delete. + +""" +function delete_snapshot_copy_configuration( + snapshotCopyConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "DeleteSnapshotCopyConfiguration", + Dict{String,Any}("snapshotCopyConfigurationId" => snapshotCopyConfigurationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_snapshot_copy_configuration( + snapshotCopyConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "DeleteSnapshotCopyConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "snapshotCopyConfigurationId" => snapshotCopyConfigurationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_usage_limit(usage_limit_id) delete_usage_limit(usage_limit_id, params::Dict{String,<:Any}) @@ -544,8 +879,8 @@ function delete_workgroup( end """ - get_credentials(workgroup_name) - get_credentials(workgroup_name, params::Dict{String,<:Any}) + get_credentials() + get_credentials(params::Dict{String,<:Any}) Returns a database user name and temporary password with temporary authorization to log in to Amazon Redshift Serverless. By default, the temporary credentials expire in 900 seconds. @@ -556,11 +891,10 @@ and resources.</p> <p>If the <code>DbName</code> paramet specified, the IAM policy must allow access to the resource dbname for the specified database name.</p> -# Arguments -- `workgroup_name`: The name of the workgroup associated with the database. - # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customDomainName"`: The custom domain name associated with the workgroup. The custom + domain name or the workgroup name must be included in the request. - `"dbName"`: The name of the database to get temporary authorization to log on to. Constraints: Must be 1 to 64 alphanumeric characters or hyphens. Must contain only uppercase or lowercase letters, numbers, underscore, plus sign, period (dot), at symbol @@ -569,24 +903,60 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Words in the Amazon Redshift Database Developer Guide - `"durationSeconds"`: The number of seconds until the returned temporary password expires. The minimum is 900 seconds, and the maximum is 3600 seconds. +- `"workgroupName"`: The name of the workgroup associated with the database. """ -function get_credentials(workgroupName; aws_config::AbstractAWSConfig=global_aws_config()) +function get_credentials(; aws_config::AbstractAWSConfig=global_aws_config()) return redshift_serverless( - "GetCredentials", - Dict{String,Any}("workgroupName" => workgroupName); + "GetCredentials"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_credentials( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "GetCredentials", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + get_custom_domain_association(custom_domain_name, workgroup_name) + get_custom_domain_association(custom_domain_name, workgroup_name, params::Dict{String,<:Any}) + +Gets information about a specific custom domain association. + +# Arguments +- `custom_domain_name`: The custom domain name associated with the workgroup. +- `workgroup_name`: The name of the workgroup associated with the database. + +""" +function get_custom_domain_association( + customDomainName, workgroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "GetCustomDomainAssociation", + Dict{String,Any}( + "customDomainName" => customDomainName, "workgroupName" => workgroupName + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_credentials( +function get_custom_domain_association( + customDomainName, workgroupName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return redshift_serverless( - "GetCredentials", + "GetCustomDomainAssociation", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("workgroupName" => workgroupName), params) + mergewith( + _merge, + Dict{String,Any}( + "customDomainName" => customDomainName, "workgroupName" => workgroupName + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -732,6 +1102,45 @@ function get_resource_policy( ) end +""" + get_scheduled_action(scheduled_action_name) + get_scheduled_action(scheduled_action_name, params::Dict{String,<:Any}) + +Returns information about a scheduled action. + +# Arguments +- `scheduled_action_name`: The name of the scheduled action. + +""" +function get_scheduled_action( + scheduledActionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "GetScheduledAction", + Dict{String,Any}("scheduledActionName" => scheduledActionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_scheduled_action( + scheduledActionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "GetScheduledAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("scheduledActionName" => scheduledActionName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_snapshot() get_snapshot(params::Dict{String,<:Any}) @@ -864,6 +1273,43 @@ function get_workgroup( ) end +""" + list_custom_domain_associations() + list_custom_domain_associations(params::Dict{String,<:Any}) + + Lists custom domain associations for Amazon Redshift Serverless. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customDomainCertificateArn"`: The custom domain name’s certificate Amazon resource + name (ARN). +- `"customDomainName"`: The custom domain name associated with the workgroup. +- `"maxResults"`: An optional parameter that specifies the maximum number of results to + return. You can use nextToken to display the next page of results. +- `"nextToken"`: When nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +""" +function list_custom_domain_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "ListCustomDomainAssociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_custom_domain_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "ListCustomDomainAssociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_endpoint_access() list_endpoint_access(params::Dict{String,<:Any}) @@ -877,6 +1323,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: If your initial ListEndpointAccess operation returns a nextToken, you can include the returned nextToken in following ListEndpointAccess operations, which returns results in the next page. +- `"ownerAccount"`: The owner Amazon Web Services account for the Amazon Redshift + Serverless workgroup. - `"vpcId"`: The unique identifier of the virtual private cloud with access to Amazon Redshift Serverless. - `"workgroupName"`: The name of the workgroup associated with the VPC endpoint to return. @@ -953,6 +1401,73 @@ function list_recovery_points( ) end +""" + list_scheduled_actions() + list_scheduled_actions(params::Dict{String,<:Any}) + +Returns a list of scheduled actions. You can use the flags to filter the list of returned +scheduled actions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: An optional parameter that specifies the maximum number of results to + return. Use nextToken to display the next page of results. +- `"namespaceName"`: The name of namespace associated with the scheduled action to retrieve. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +""" +function list_scheduled_actions(; aws_config::AbstractAWSConfig=global_aws_config()) + return redshift_serverless( + "ListScheduledActions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_scheduled_actions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "ListScheduledActions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_snapshot_copy_configurations() + list_snapshot_copy_configurations(params::Dict{String,<:Any}) + +Returns a list of snapshot copy configurations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: An optional parameter that specifies the maximum number of results to + return. You can use nextToken to display the next page of results. +- `"namespaceName"`: The namespace from which to list all snapshot copy configurations. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. +""" +function list_snapshot_copy_configurations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "ListSnapshotCopyConfigurations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_snapshot_copy_configurations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "ListSnapshotCopyConfigurations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_snapshots() list_snapshots(params::Dict{String,<:Any}) @@ -1098,6 +1613,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: If your initial ListWorkgroups operation returns a nextToken, you can include the returned nextToken in following ListNamespaces operations, which returns results in the next page. +- `"ownerAccount"`: The owner Amazon Web Services account for the Amazon Redshift + Serverless workgroup. """ function list_workgroups(; aws_config::AbstractAWSConfig=global_aws_config()) return redshift_serverless( @@ -1224,6 +1741,12 @@ Restores a namespace from a snapshot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"adminPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the namespace's admin credentials secret. +- `"manageAdminPassword"`: If true, Amazon Redshift uses Secrets Manager to manage the + restored snapshot's admin credentials. If MmanageAdminPassword is false or not set, Amazon + Redshift uses the admin credentials that the namespace or cluster had at the time the + snapshot was taken. - `"ownerAccount"`: The Amazon Web Services account that owns the snapshot. - `"snapshotArn"`: The Amazon Resource Name (ARN) of the snapshot to restore from. Required if restoring from Amazon Redshift Serverless to a provisioned cluster. Must not be @@ -1267,6 +1790,86 @@ function restore_from_snapshot( ) end +""" + restore_table_from_recovery_point(namespace_name, new_table_name, recovery_point_id, source_database_name, source_table_name, workgroup_name) + restore_table_from_recovery_point(namespace_name, new_table_name, recovery_point_id, source_database_name, source_table_name, workgroup_name, params::Dict{String,<:Any}) + +Restores a table from a recovery point to your Amazon Redshift Serverless instance. You +can't use this operation to restore tables with interleaved sort keys. + +# Arguments +- `namespace_name`: Namespace of the recovery point to restore from. +- `new_table_name`: The name of the table to create from the restore operation. +- `recovery_point_id`: The ID of the recovery point to restore the table from. +- `source_database_name`: The name of the source database that contains the table being + restored. +- `source_table_name`: The name of the source table being restored. +- `workgroup_name`: The workgroup to restore the table to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"activateCaseSensitiveIdentifier"`: Indicates whether name identifiers for database, + schema, and table are case sensitive. If true, the names are case sensitive. If false, the + names are not case sensitive. The default is false. +- `"sourceSchemaName"`: The name of the source schema that contains the table being + restored. +- `"targetDatabaseName"`: The name of the database to restore the table to. +- `"targetSchemaName"`: The name of the schema to restore the table to. +""" +function restore_table_from_recovery_point( + namespaceName, + newTableName, + recoveryPointId, + sourceDatabaseName, + sourceTableName, + workgroupName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "RestoreTableFromRecoveryPoint", + Dict{String,Any}( + "namespaceName" => namespaceName, + "newTableName" => newTableName, + "recoveryPointId" => recoveryPointId, + "sourceDatabaseName" => sourceDatabaseName, + "sourceTableName" => sourceTableName, + "workgroupName" => workgroupName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function restore_table_from_recovery_point( + namespaceName, + newTableName, + recoveryPointId, + sourceDatabaseName, + sourceTableName, + workgroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "RestoreTableFromRecoveryPoint", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "namespaceName" => namespaceName, + "newTableName" => newTableName, + "recoveryPointId" => recoveryPointId, + "sourceDatabaseName" => sourceDatabaseName, + "sourceTableName" => sourceTableName, + "workgroupName" => workgroupName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ restore_table_from_snapshot(namespace_name, new_table_name, snapshot_name, source_database_name, source_table_name, workgroup_name) restore_table_from_snapshot(namespace_name, new_table_name, snapshot_name, source_database_name, source_table_name, workgroup_name, params::Dict{String,<:Any}) @@ -1427,6 +2030,61 @@ function untag_resource( ) end +""" + update_custom_domain_association(custom_domain_certificate_arn, custom_domain_name, workgroup_name) + update_custom_domain_association(custom_domain_certificate_arn, custom_domain_name, workgroup_name, params::Dict{String,<:Any}) + +Updates an Amazon Redshift Serverless certificate associated with a custom domain. + +# Arguments +- `custom_domain_certificate_arn`: The custom domain name’s certificate Amazon resource + name (ARN). This is optional. +- `custom_domain_name`: The custom domain name associated with the workgroup. +- `workgroup_name`: The name of the workgroup associated with the database. + +""" +function update_custom_domain_association( + customDomainCertificateArn, + customDomainName, + workgroupName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "UpdateCustomDomainAssociation", + Dict{String,Any}( + "customDomainCertificateArn" => customDomainCertificateArn, + "customDomainName" => customDomainName, + "workgroupName" => workgroupName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_custom_domain_association( + customDomainCertificateArn, + customDomainName, + workgroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "UpdateCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "customDomainCertificateArn" => customDomainCertificateArn, + "customDomainName" => customDomainName, + "workgroupName" => workgroupName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_endpoint_access(endpoint_name) update_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -1481,8 +2139,12 @@ in a single request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"adminPasswordSecretKmsKeyId"`: The ID of the Key Management Service (KMS) key used to + encrypt and store the namespace's admin credentials secret. You can only use this parameter + if manageAdminPassword is true. - `"adminUserPassword"`: The password of the administrator for the first database created - in the namespace. This parameter must be updated together with adminUsername. + in the namespace. This parameter must be updated together with adminUsername. You can't use + adminUserPassword if manageAdminPassword is true. - `"adminUsername"`: The username of the administrator for the first database created in the namespace. This parameter must be updated together with adminUserPassword. - `"defaultIamRoleArn"`: The Amazon Resource Name (ARN) of the IAM role to set as a default @@ -1493,6 +2155,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encrypt your data. - `"logExports"`: The types of logs the namespace can export. The export types are userlog, connectionlog, and useractivitylog. +- `"manageAdminPassword"`: If true, Amazon Redshift uses Secrets Manager to manage the + namespace's admin credentials. You can't use adminUserPassword if manageAdminPassword is + true. If manageAdminPassword is false or not set, Amazon Redshift uses adminUserPassword + for the admin user account's password. """ function update_namespace(namespaceName; aws_config::AbstractAWSConfig=global_aws_config()) return redshift_serverless( @@ -1517,6 +2183,65 @@ function update_namespace( ) end +""" + update_scheduled_action(scheduled_action_name) + update_scheduled_action(scheduled_action_name, params::Dict{String,<:Any}) + +Updates a scheduled action. + +# Arguments +- `scheduled_action_name`: The name of the scheduled action to update to. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"enabled"`: Specifies whether to enable the scheduled action. +- `"endTime"`: The end time in UTC of the scheduled action to update. +- `"roleArn"`: The ARN of the IAM role to assume to run the scheduled action. This IAM role + must have permission to run the Amazon Redshift Serverless API operation in the scheduled + action. This IAM role must allow the Amazon Redshift scheduler to schedule creating + snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your + behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster + Management Guide +- `"schedule"`: The schedule for a one-time (at timestamp format) or recurring (cron + format) scheduled action. Schedule invocations must be separated by at least one hour. + Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, + 2016-03-04T17:27:00. Format of cron expression is (Minutes Hours Day-of-month Month + Day-of-week Year). For example, \"(0 10 ? * MON *)\". For more information, see Cron + Expressions in the Amazon CloudWatch Events User Guide. +- `"scheduledActionDescription"`: The descripion of the scheduled action to update to. +- `"startTime"`: The start time in UTC of the scheduled action to update to. +- `"targetAction"`: +""" +function update_scheduled_action( + scheduledActionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "UpdateScheduledAction", + Dict{String,Any}("scheduledActionName" => scheduledActionName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_scheduled_action( + scheduledActionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "UpdateScheduledAction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("scheduledActionName" => scheduledActionName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_snapshot(snapshot_name) update_snapshot(snapshot_name, params::Dict{String,<:Any}) @@ -1553,6 +2278,51 @@ function update_snapshot( ) end +""" + update_snapshot_copy_configuration(snapshot_copy_configuration_id) + update_snapshot_copy_configuration(snapshot_copy_configuration_id, params::Dict{String,<:Any}) + +Updates a snapshot copy configuration. + +# Arguments +- `snapshot_copy_configuration_id`: The ID of the snapshot copy configuration to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"snapshotRetentionPeriod"`: The new retention period of how long to keep a snapshot in + the destination Amazon Web Services Region. +""" +function update_snapshot_copy_configuration( + snapshotCopyConfigurationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift_serverless( + "UpdateSnapshotCopyConfiguration", + Dict{String,Any}("snapshotCopyConfigurationId" => snapshotCopyConfigurationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_snapshot_copy_configuration( + snapshotCopyConfigurationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift_serverless( + "UpdateSnapshotCopyConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "snapshotCopyConfigurationId" => snapshotCopyConfigurationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_usage_limit(usage_limit_id) update_usage_limit(usage_limit_id, params::Dict{String,<:Any}) @@ -1611,13 +2381,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"baseCapacity"`: The new base data warehouse capacity in Redshift Processing Units (RPUs). - `"configParameters"`: An array of parameters to set for advanced control over a database. - The options are auto_mv, datestyle, enable_case_sensitivity_identifier, - enable_user_activity_logging, query_group, search_path, and query monitoring metrics that - let you define performance boundaries. For more information about query monitoring rules - and available metrics, see Query monitoring metrics for Amazon Redshift Serverless. + The options are auto_mv, datestyle, enable_case_sensitive_identifier, + enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and + query monitoring metrics that let you define performance boundaries. For more information + about query monitoring rules and available metrics, see Query monitoring metrics for + Amazon Redshift Serverless. - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. +- `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to + serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are 5431-5455 and 8191-8215. The default is 5439. - `"publiclyAccessible"`: A value that specifies whether the workgroup can be accessible diff --git a/src/services/rekognition.jl b/src/services/rekognition.jl index 45aa33aced..ea1220c2fc 100644 --- a/src/services/rekognition.jl +++ b/src/services/rekognition.jl @@ -182,21 +182,23 @@ end copy_project_version(destination_project_arn, output_config, source_project_arn, source_project_version_arn, version_name) copy_project_version(destination_project_arn, output_config, source_project_arn, source_project_version_arn, version_name, params::Dict{String,<:Any}) -Copies a version of an Amazon Rekognition Custom Labels model from a source project to a -destination project. The source and destination projects can be in different AWS accounts -but must be in the same AWS Region. You can't copy a model to another AWS service. To copy -a model version to a different AWS account, you need to create a resource-based policy -known as a project policy. You attach the project policy to the source project by calling -PutProjectPolicy. The project policy gives permission to copy the model version from a -trusting AWS account to a trusted account. For more information creating and attaching a -project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom -Labels Developer Guide. If you are copying a model version to a project in the same AWS -account, you don't need to create a project policy. To copy a model, the destination -project, source project, and source model version must already exist. Copying a model -version takes a while to complete. To get the current status, call DescribeProjectVersions -and check the value of Status in the ProjectVersionDescription object. The copy operation -has finished when the value of Status is COPYING_COMPLETED. This operation requires -permissions to perform the rekognition:CopyProjectVersion action. + This operation applies only to Amazon Rekognition Custom Labels. Copies a version of an +Amazon Rekognition Custom Labels model from a source project to a destination project. The +source and destination projects can be in different AWS accounts but must be in the same +AWS Region. You can't copy a model to another AWS service. To copy a model version to a +different AWS account, you need to create a resource-based policy known as a project +policy. You attach the project policy to the source project by calling PutProjectPolicy. +The project policy gives permission to copy the model version from a trusting AWS account +to a trusted account. For more information creating and attaching a project policy, see +Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. +If you are copying a model version to a project in the same AWS account, you don't need to +create a project policy. Copying project versions is supported only for Custom Labels +models. To copy a model, the destination project, source project, and source model version +must already exist. Copying a model version takes a while to complete. To get the current +status, call DescribeProjectVersions and check the value of Status in the +ProjectVersionDescription object. The copy operation has finished when the value of Status +is COPYING_COMPLETED. This operation requires permissions to perform the +rekognition:CopyProjectVersion action. # Arguments - `destination_project_arn`: The ARN of the project in the trusted AWS account that you @@ -319,24 +321,25 @@ end create_dataset(dataset_type, project_arn) create_dataset(dataset_type, project_arn, params::Dict{String,<:Any}) -Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using -an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition -Custom Labels dataset. To create a training dataset for a project, specify train for the -value of DatasetType. To create the test dataset for a project, specify test for the value -of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the -dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the -current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. - To check if any non-terminal errors occurred, call ListDatasetEntries and check for the -presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error -occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error -information. For more information, see Creating dataset in the Amazon Rekognition Custom -Labels Developer Guide. This operation requires permissions to perform the -rekognition:CreateDataset action. If you want to copy an existing dataset, you also require -permission to perform the rekognition:ListDatasetEntries action. - -# Arguments -- `dataset_type`: The type of the dataset. Specify train to create a training dataset. - Specify test to create a test dataset. + This operation applies only to Amazon Rekognition Custom Labels. Creates a new Amazon +Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker +format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To +create a training dataset for a project, specify TRAIN for the value of DatasetType. To +create the test dataset for a project, specify TEST for the value of DatasetType. The +response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a +dataset takes a while to complete. Use DescribeDataset to check the current status. The +dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any +non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors +lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = +CREATE_FAILED). Currently, you can't access the terminal error information. For more +information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. +This operation requires permissions to perform the rekognition:CreateDataset action. If you +want to copy an existing dataset, you also require permission to perform the +rekognition:ListDatasetEntries action. + +# Arguments +- `dataset_type`: The type of the dataset. Specify TRAIN to create a training dataset. + Specify TEST to create a test dataset. - `project_arn`: The ARN of the Amazon Rekognition Custom Labels project to which you want to asssign the dataset. @@ -383,11 +386,12 @@ end This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. -You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 -bucket location. The Amazon S3 bucket stores reference images and audit images. You can use -AuditImagesLimit to limit the number of audit images returned. This number is between 0 and -4. By default, it is set to 0. The limit is best effort and based on the duration of the -selfie-video. + You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 +bucket location. The Amazon S3 bucket stores reference images and audit images. If no +Amazon S3 bucket is defined, raw bytes are sent instead. You can use AuditImagesLimit to +limit the number of audit images returned when GetFaceLivenessSessionResults is called. +This number is between 0 and 4. By default, it is set to 0. The limit is best effort and +based on the duration of the selfie-video. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -420,14 +424,23 @@ end create_project(project_name) create_project(project_name, params::Dict{String,<:Any}) -Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources -(datasets, model versions) that you use to create and manage Amazon Rekognition Custom -Labels models. This operation requires permissions to perform the -rekognition:CreateProject action. +Creates a new Amazon Rekognition project. A project is a group of resources (datasets, +model versions) that you use to create and manage a Amazon Rekognition Custom Labels Model +or custom adapter. You can specify a feature to create the project with, if no feature is +specified then Custom Labels is used by default. For adapters, you can also choose whether +or not to have the project auto update by using the AutoUpdate argument. This operation +requires permissions to perform the rekognition:CreateProject action. # Arguments - `project_name`: The name of the project to create. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AutoUpdate"`: Specifies whether automatic retraining should be attempted for the + versions of the project. Automatic retraining is done as a best effort. Required argument + for Content Moderation. Applicable only to adapters. +- `"Feature"`: Specifies feature that is being customized. If no value is provided + CUSTOM_LABELS is used as a default. """ function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -456,53 +469,56 @@ end create_project_version(output_config, project_arn, version_name) create_project_version(output_config, project_arn, version_name, params::Dict{String,<:Any}) -Creates a new version of a model and begins training. Models are managed as part of an -Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an -Amazon Resource Name (ARN) for the version of the model. Training uses the training and -test datasets associated with the project. For more information, see Creating training and -test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a -model in a project that doesn't have associated datasets by specifying manifest files in -the TrainingData and TestingData fields. If you open the console after training a model -with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using -the most recent manifest files. You can no longer train a model version for the project by -specifying manifest files. Instead of training with a project without associated datasets, -we recommend that you use the manifest files to create training and test datasets for the -project. Training takes a while to complete. You can get the current status by calling -DescribeProjectVersions. Training completed successfully if the value of the Status field -is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the -Amazon Rekognition Custom Labels developer guide. Once training has successfully -completed, call DescribeProjectVersions to get the training results and evaluate the model. -For more information, see Improving a trained Amazon Rekognition Custom Labels model in the -Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start -the model by calling StartProjectVersion. This operation requires permissions to perform -the rekognition:CreateProjectVersion action. - -# Arguments -- `output_config`: The Amazon S3 bucket location to store the results of training. The S3 - bucket can be in any AWS account as long as the caller has s3:PutObject permissions on the - S3 bucket. -- `project_arn`: The ARN of the Amazon Rekognition Custom Labels project that manages the - model that you want to train. -- `version_name`: A name for the version of the model. This value must be unique. +Creates a new version of Amazon Rekognition project (like a Custom Labels model or a custom +adapter) and begins training. Models and adapters are managed as part of a Rekognition +project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the +project version. The FeatureConfig operation argument allows you to configure specific +model or adapter settings. You can provide a description to the project version by using +the VersionDescription argment. Training can take a while to complete. You can get the +current status by calling DescribeProjectVersions. Training completed successfully if the +value of the Status field is TRAINING_COMPLETED. Once training has successfully completed, +call DescribeProjectVersions to get the training results and evaluate the model. This +operation requires permissions to perform the rekognition:CreateProjectVersion action. +The following applies only to projects with Amazon Rekognition Custom Labels as the chosen +feature: You can train a model in a project that doesn't have associated datasets by +specifying manifest files in the TrainingData and TestingData fields. If you open the +console after training a model with manifest files, Amazon Rekognition Custom Labels +creates the datasets for you using the most recent manifest files. You can no longer train +a model version for the project by specifying manifest files. Instead of training with a +project without associated datasets, we recommend that you use the manifest files to create +training and test datasets for the project. + +# Arguments +- `output_config`: The Amazon S3 bucket location to store the results of training. The + bucket can be any S3 bucket in your AWS account. You need s3:PutObject permission on the + bucket. +- `project_arn`: The ARN of the Amazon Rekognition project that will manage the project + version you want to train. +- `version_name`: A name for the version of the project version. This value must be unique. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FeatureConfig"`: Feature-specific configuration of the training job. If the job + configuration does not match the feature type associated with the project, an + InvalidParameterException is returned. - `"KmsKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias - for your KMS key, or an alias ARN. The key is used to encrypt training and test images - copied into the service for model training. Your source images are unaffected. The key is - also used to encrypt training results and manifest files written to the output Amazon S3 - bucket (OutputConfig). If you choose to use your own KMS key, you need the following - permissions on the KMS key. kms:CreateGrant kms:DescribeKey kms:GenerateDataKey - kms:Decrypt If you don't specify a value for KmsKeyId, images copied into the service are - encrypted using a key that AWS owns and manages. -- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the model. -- `"TestingData"`: Specifies an external manifest that the service uses to test the model. - If you specify TestingData you must also specify TrainingData. The project must not have - any associated datasets. + for your KMS key, or an alias ARN. The key is used to encrypt training images, test images, + and manifest files copied into the service for the project version. Your source images are + unaffected. The key is also used to encrypt training results and manifest files written to + the output Amazon S3 bucket (OutputConfig). If you choose to use your own KMS key, you need + the following permissions on the KMS key. kms:CreateGrant kms:DescribeKey + kms:GenerateDataKey kms:Decrypt If you don't specify a value for KmsKeyId, images + copied into the service are encrypted using a key that AWS owns and manages. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the project + version. +- `"TestingData"`: Specifies an external manifest that the service uses to test the project + version. If you specify TestingData you must also specify TrainingData. The project must + not have any associated datasets. - `"TrainingData"`: Specifies an external manifest that the services uses to train the - model. If you specify TrainingData you must also specify TestingData. The project must not - have any associated datasets. + project version. If you specify TrainingData you must also specify TestingData. The project + must not have any associated datasets. +- `"VersionDescription"`: A description applied to the project version being created. """ function create_project_version( OutputConfig, ProjectArn, VersionName; aws_config::AbstractAWSConfig=global_aws_config() @@ -762,13 +778,13 @@ end delete_dataset(dataset_arn) delete_dataset(dataset_arn, params::Dict{String,<:Any}) -Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take -while. Use DescribeDataset to check the current status. The dataset is still deleting if -the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is -deleted, you get a ResourceNotFoundException exception. You can't delete a dataset while -it is creating (Status = CREATE_IN_PROGRESS) or if the dataset is updating (Status = -UPDATE_IN_PROGRESS). This operation requires permissions to perform the -rekognition:DeleteDataset action. + This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing +Amazon Rekognition Custom Labels dataset. Deleting a dataset might take while. Use +DescribeDataset to check the current status. The dataset is still deleting if the value of +Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get +a ResourceNotFoundException exception. You can't delete a dataset while it is creating +(Status = CREATE_IN_PROGRESS) or if the dataset is updating (Status = UPDATE_IN_PROGRESS). +This operation requires permissions to perform the rekognition:DeleteDataset action. # Arguments - `dataset_arn`: The ARN of the Amazon Rekognition Custom Labels dataset that you want to @@ -845,13 +861,13 @@ end delete_project(project_arn) delete_project(project_arn, params::Dict{String,<:Any}) -Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first -delete all models associated with the project. To delete a model, see DeleteProjectVersion. - DeleteProject is an asynchronous operation. To check if the project is deleted, call -DescribeProjects. The project is deleted when the project no longer appears in the -response. Be aware that deleting a given project will also delete any ProjectPolicies -associated with that project. This operation requires permissions to perform the -rekognition:DeleteProject action. +Deletes a Amazon Rekognition project. To delete a project you must first delete all models +or adapters associated with the project. To delete a model or adapter, see +DeleteProjectVersion. DeleteProject is an asynchronous operation. To check if the project +is deleted, call DescribeProjects. The project is deleted when the project no longer +appears in the response. Be aware that deleting a given project will also delete any +ProjectPolicies associated with that project. This operation requires permissions to +perform the rekognition:DeleteProject action. # Arguments - `project_arn`: The Amazon Resource Name (ARN) of the project that you want to delete. @@ -884,10 +900,10 @@ end delete_project_policy(policy_name, project_arn) delete_project_policy(policy_name, project_arn, params::Dict{String,<:Any}) -Deletes an existing project policy. To get a list of project policies attached to a -project, call ListProjectPolicies. To attach a project policy to a project, call -PutProjectPolicy. This operation requires permissions to perform the -rekognition:DeleteProjectPolicy action. + This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing +project policy. To get a list of project policies attached to a project, call +ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This +operation requires permissions to perform the rekognition:DeleteProjectPolicy action. # Arguments - `policy_name`: The name of the policy that you want to delete. @@ -932,15 +948,16 @@ end delete_project_version(project_version_arn) delete_project_version(project_version_arn, params::Dict{String,<:Any}) -Deletes an Amazon Rekognition Custom Labels model. You can't delete a model if it is -running or if it is training. To check the status of a model, use the Status field returned -from DescribeProjectVersions. To stop a running model call StopProjectVersion. If the model -is training, wait until it finishes. This operation requires permissions to perform the -rekognition:DeleteProjectVersion action. +Deletes a Rekognition project model or project version, like a Amazon Rekognition Custom +Labels model or a custom adapter. You can't delete a project version if it is running or if +it is training. To check the status of a project version, use the Status field returned +from DescribeProjectVersions. To stop a project version call StopProjectVersion. If the +project version is training, wait until it finishes. This operation requires permissions to +perform the rekognition:DeleteProjectVersion action. # Arguments -- `project_version_arn`: The Amazon Resource Name (ARN) of the model version that you want - to delete. +- `project_version_arn`: The Amazon Resource Name (ARN) of the project version that you + want to delete. """ function delete_project_version( @@ -1102,9 +1119,10 @@ end describe_dataset(dataset_arn) describe_dataset(dataset_arn, params::Dict{String,<:Any}) - Describes an Amazon Rekognition Custom Labels dataset. You can get information such as the -current status of a dataset and statistics about the images and labels in a dataset. This -operation requires permissions to perform the rekognition:DescribeDataset action. + This operation applies only to Amazon Rekognition Custom Labels. Describes an Amazon +Rekognition Custom Labels dataset. You can get information such as the current status of a +dataset and statistics about the images and labels in a dataset. This operation requires +permissions to perform the rekognition:DescribeDataset action. # Arguments - `dataset_arn`: The Amazon Resource Name (ARN) of the dataset that you want to describe. @@ -1137,14 +1155,14 @@ end describe_project_versions(project_arn) describe_project_versions(project_arn, params::Dict{String,<:Any}) -Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. -You can specify up to 10 model versions in ProjectVersionArns. If you don't specify a -value, descriptions for all model versions in the project are returned. This operation -requires permissions to perform the rekognition:DescribeProjectVersions action. +Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 +model or adapter versions in ProjectVersionArns. If you don't specify a value, descriptions +for all model/adapter versions in the project are returned. This operation requires +permissions to perform the rekognition:DescribeProjectVersions action. # Arguments -- `project_arn`: The Amazon Resource Name (ARN) of the project that contains the models you - want to describe. +- `project_arn`: The Amazon Resource Name (ARN) of the project that contains the + model/adapter you want to describe. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1152,12 +1170,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100. - `"NextToken"`: If the previous response was incomplete (because there is more results to - retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You - can use this pagination token to retrieve the next set of results. -- `"VersionNames"`: A list of model version names that you want to describe. You can add up - to 10 model version names to the list. If you don't specify a value, all model descriptions - are returned. A version name is part of a model (ProjectVersion) ARN. For example, - my-model.2020-01-21T09.10.15 is the version name in the following ARN. + retrieve), Amazon Rekognition returns a pagination token in the response. You can use this + pagination token to retrieve the next set of results. +- `"VersionNames"`: A list of model or project version names that you want to describe. You + can add up to 10 model or project version names to the list. If you don't specify a value, + all project version descriptions are returned. A version name is part of a project version + ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01 -21T09.10.15/1234567890123. """ @@ -1190,20 +1208,22 @@ end describe_projects() describe_projects(params::Dict{String,<:Any}) -Gets information about your Amazon Rekognition Custom Labels projects. This operation -requires permissions to perform the rekognition:DescribeProjects action. +Gets information about your Rekognition projects. This operation requires permissions to +perform the rekognition:DescribeProjects action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Features"`: Specifies the type of customization to filter projects by. If no value is + specified, CUSTOM_LABELS is used as a default. - `"MaxResults"`: The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100. - `"NextToken"`: If the previous response was incomplete (because there is more results to - retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You - can use this pagination token to retrieve the next set of results. -- `"ProjectNames"`: A list of the projects that you want Amazon Rekognition Custom Labels - to describe. If you don't specify a value, the response includes descriptions for all the - projects in your AWS account. + retrieve), Rekognition returns a pagination token in the response. You can use this + pagination token to retrieve the next set of results. +- `"ProjectNames"`: A list of the projects that you want Rekognition to describe. If you + don't specify a value, the response includes descriptions for all the projects in your AWS + account. """ function describe_projects(; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -1253,16 +1273,18 @@ end detect_custom_labels(image, project_version_arn) detect_custom_labels(image, project_version_arn, params::Dict{String,<:Any}) -Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels -model. You specify which version of a model version to use by using the ProjectVersionArn -input parameter. You pass the input image as base64-encoded image bytes or as a reference -to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition -operations, passing image bytes is not supported. The image must be either a PNG or JPEG -formatted file. For each object that the model version detects on an image, the API -returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides -the label name (Name), the level of confidence that the image contains the object -(Confidence), and object location information, if it exists, for the label on the image -(Geometry). To filter labels that are returned, specify a value for MinConfidence. + This operation applies only to Amazon Rekognition Custom Labels. Detects custom labels in +a supplied image by using an Amazon Rekognition Custom Labels model. You specify which +version of a model version to use by using the ProjectVersionArn input parameter. You pass +the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 +bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes +is not supported. The image must be either a PNG or JPEG formatted file. For each object +that the model version detects on an image, the API returns a (CustomLabel) object in an +array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of +confidence that the image contains the object (Confidence), and object location +information, if it exists, for the label on the image (Geometry). Note that for the +DetectCustomLabelsLabels operation, Polygons are not returned in the Geometry section of +the response. To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition @@ -1280,7 +1302,10 @@ Developer Guide. # Arguments - `image`: -- `project_version_arn`: The ARN of the model version that you want to use. +- `project_version_arn`: The ARN of the model version that you want to use. Only models + associated with Custom Labels projects accepted by the operation. If a provided ARN refers + to a model version associated with a project for a different feature type, then an + InvalidParameterException is returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1360,7 +1385,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys request for all facial attributes by using [\"ALL\"]. Requesting more attributes may increase response time. If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\" operator to determine which attributes to return (in this case, all - attributes). + attributes). Note that while the FaceOccluded and EyeDirection attributes are supported + when using DetectFaces, they aren't supported when analyzing videos with StartFaceDetection + and GetFaceDetection. """ function detect_faces(Image; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -1399,43 +1426,44 @@ will ensure the response includes information about the image quality and color. GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive -filters. For more information on filtering see Detecting Labels in an Image. You can -specify MinConfidence to control the confidence threshold for the labels returned. The -default is 55%. You can also add the MaxLabels parameter to limit the number of labels -returned. The default and upper limit is 1000 labels. Response Elements For each object, -scene, and concept the API returns one or more labels. The API returns the following types -of information about labels: Name - The name of the detected label. Confidence - The -level of confidence in the label assigned to a detected object. Parents - The ancestor -labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected -labels. For example, a detected car might be assigned the label car. The label car has two -parent labels: Vehicle (its parent) and Transportation (its grandparent). The response -includes the all ancestors for a label, where every ancestor is a unique label. In the -previous example, Car, Vehicle, and Transportation are returned as unique labels in the -response. Aliases - Possible Aliases for the label. Categories - The label -categories that the detected label belongs to. BoundingBox — Bounding boxes are -described for all instances of detected common object labels, returned in an array of -Instance objects. An Instance object contains a BoundingBox object, describing the location -of the label on the input image. It also includes the confidence for the accuracy of the -detected bounding box. The API returns the following information regarding the image, -as part of the ImageProperties structure: Quality - Information about the Sharpness, -Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is -returned for the entire image, as well as the background and the foreground. Dominant -Color - An array of the dominant colors in the image. Foreground - Information about the -sharpness, brightness, and dominant colors of the input image’s foreground. Background -- Information about the sharpness, brightness, and dominant colors of the input image’s -background. The list of returned labels will include at least one label for every -detected object, along with information about that label. In the following example, suppose -the input image has a lighthouse, the sea, and a rock. The response includes all three -labels, one for each object, as well as the confidence in the label: {Name: lighthouse, -Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} -The list of labels can include multiple labels for the same object. For example, if the -input image shows a flower (for example, a tulip), the operation might return the following -three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} -{Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely -identifies the flower as a tulip. If the object detected is a person, the operation -doesn't provide the same facial details that the DetectFaces operation provides. This is a -stateless API operation that doesn't return any data. This operation requires permissions -to perform the rekognition:DetectLabels action. +filters. For more information on filtering see Detecting Labels in an Image. When getting +labels, you can specify MinConfidence to control the confidence threshold for the labels +returned. The default is 55%. You can also add the MaxLabels parameter to limit the number +of labels returned. The default and upper limit is 1000 labels. These arguments are only +valid when supplying GENERAL_LABELS as a feature type. Response Elements For each +object, scene, and concept the API returns one or more labels. The API returns the +following types of information about labels: Name - The name of the detected label. +Confidence - The level of confidence in the label assigned to a detected object. +Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical +taxonomy of detected labels. For example, a detected car might be assigned the label car. +The label car has two parent labels: Vehicle (its parent) and Transportation (its +grandparent). The response includes the all ancestors for a label, where every ancestor is +a unique label. In the previous example, Car, Vehicle, and Transportation are returned as +unique labels in the response. Aliases - Possible Aliases for the label. Categories +- The label categories that the detected label belongs to. BoundingBox — Bounding +boxes are described for all instances of detected common object labels, returned in an +array of Instance objects. An Instance object contains a BoundingBox object, describing the +location of the label on the input image. It also includes the confidence for the accuracy +of the detected bounding box. The API returns the following information regarding the +image, as part of the ImageProperties structure: Quality - Information about the +Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image +quality is returned for the entire image, as well as the background and the foreground. +Dominant Color - An array of the dominant colors in the image. Foreground - Information +about the sharpness, brightness, and dominant colors of the input image’s foreground. +Background - Information about the sharpness, brightness, and dominant colors of the input +image’s background. The list of returned labels will include at least one label for +every detected object, along with information about that label. In the following example, +suppose the input image has a lighthouse, the sea, and a rock. The response includes all +three labels, one for each object, as well as the confidence in the label: {Name: +lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: +sea,Confidence: 75.061} The list of labels can include multiple labels for the same +object. For example, if the input image shows a flower (for example, a tulip), the +operation might return the following three labels. {Name: flower,Confidence: 99.0562} +{Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the +detection algorithm more precisely identifies the flower as a tulip. If the object +detected is a person, the operation doesn't provide the same facial details that the +DetectFaces operation provides. This is a stateless API operation that doesn't return any +data. This operation requires permissions to perform the rekognition:DetectLabels action. # Arguments - `image`: The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI @@ -1451,11 +1479,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys regarding image color and quality. If no option is specified GENERAL_LABELS is used by default. - `"MaxLabels"`: Maximum number of labels you want the service to return in the response. - The service returns the specified number of highest confidence labels. + The service returns the specified number of highest confidence labels. Only valid when + GENERAL_LABELS is specified as a feature type in the Feature input parameter. - `"MinConfidence"`: Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value. If MinConfidence is not specified, the operation returns labels with a confidence - values greater than or equal to 55 percent. + values greater than or equal to 55 percent. Only valid when GENERAL_LABELS is specified as + a feature type in the Feature input parameter. - `"Settings"`: A list of the filters to be applied to returned detected labels and image properties. Specified filters can be inclusive, exclusive, or a combination of both. Filters can be used for individual labels or label categories. The exact label names or @@ -1493,7 +1523,8 @@ appropriate. For information about moderation labels, see Detecting Unsafe Conte Amazon Rekognition Developer Guide. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be -either a PNG or JPEG formatted file. +either a PNG or JPEG formatted file. You can specify an adapter to use when retrieving +label predictions by providing a ProjectVersionArn to the ProjectVersion argument. # Arguments - `image`: The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI @@ -1510,6 +1541,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Rekognition doesn't return any labels with a confidence level lower than this specified value. If you don't specify MinConfidence, the operation returns labels with confidence values greater than or equal to 50 percent. +- `"ProjectVersion"`: Identifier for the custom adapter. Expects the ProjectVersionArn as a + value. Use the CreateProject or CreateProjectVersion APIs to create a custom adapter. """ function detect_moderation_labels(Image; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -1705,16 +1738,17 @@ end distribute_dataset_entries(datasets) distribute_dataset_entries(datasets, params::Dict{String,<:Any}) -Distributes the entries (images) in a training dataset across the training dataset and the -test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset -images to the test dataset. An entry is a JSON Line that describes an image. You supply -the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The -training dataset must contain the images that you want to split. The test dataset must be -empty. The datasets must belong to the same project. To create training and test datasets -for a project, call CreateDataset. Distributing a dataset takes a while to complete. To -check the status call DescribeDataset. The operation is complete when the Status field for -the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, -the value of Status is UPDATE_FAILED. This operation requires permissions to perform the + This operation applies only to Amazon Rekognition Custom Labels. Distributes the entries +(images) in a training dataset across the training dataset and the test dataset for a +project. DistributeDatasetEntries moves 20% of the training dataset images to the test +dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource +Names (ARN) of a project's training dataset and test dataset. The training dataset must +contain the images that you want to split. The test dataset must be empty. The datasets +must belong to the same project. To create training and test datasets for a project, call +CreateDataset. Distributing a dataset takes a while to complete. To check the status call +DescribeDataset. The operation is complete when the Status field for the training dataset +and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is +UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action. # Arguments @@ -1935,7 +1969,9 @@ the time the faces were detected. Use MaxResults parameter to limit the number returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request -parameter with the token value returned from the previous call to GetFaceDetection. +parameter with the token value returned from the previous call to GetFaceDetection. Note +that for the GetFaceDetection operation, the returned values for FaceOccluded and +EyeDirection will always be \"null\". # Arguments - `job_id`: Unique identifier for the face detection job. The JobId is returned from @@ -1977,7 +2013,9 @@ Retrieves the results of a specific Face Liveness session. It requires the sessi input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges -from 0 to 100. The reference image can optionally be returned. +from 0 to 100. The number of audit images returned by GetFaceLivenessSessionResults is +defined by the AuditImagesLimit paramater when calling CreateFaceLivenessSession. Reference +images are always returned when possible. # Arguments - `session_id`: A unique 128-bit UUID. This is used to uniquely identify the session and @@ -2150,6 +2188,37 @@ function get_label_detection( ) end +""" + get_media_analysis_job(job_id) + get_media_analysis_job(job_id, params::Dict{String,<:Any}) + +Retrieves the results for a given media analysis job. Takes a JobId returned by +StartMediaAnalysisJob. + +# Arguments +- `job_id`: Unique identifier for the media analysis job for which you want to retrieve + results. + +""" +function get_media_analysis_job(JobId; aws_config::AbstractAWSConfig=global_aws_config()) + return rekognition( + "GetMediaAnalysisJob", + Dict{String,Any}("JobId" => JobId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_media_analysis_job( + JobId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rekognition( + "GetMediaAnalysisJob", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("JobId" => JobId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_person_tracking(job_id) get_person_tracking(job_id, params::Dict{String,<:Any}) @@ -2279,7 +2348,7 @@ initial call to StartTextDetection. To get the results of the text detection ope first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection. GetTextDetection returns an array of detected text (TextDetections) -sorted by the time the text was detected, up to 50 words per frame of video. Each element +sorted by the time the text was detected, up to 100 words per frame of video. Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to @@ -2474,16 +2543,16 @@ end list_dataset_entries(dataset_arn) list_dataset_entries(dataset_arn, params::Dict{String,<:Any}) - Lists the entries (images) within a dataset. An entry is a JSON Line that contains the -information for a single image, including the image location, assigned labels, and object -location bounding boxes. For more information, see Creating a manifest file. JSON Lines in -the response include information about non-terminal errors found in the dataset. Non -terminal errors are reported in errors lists within each JSON Line. The same information is -reported in the training and testing validation result manifests that Amazon Rekognition -Custom Labels creates during model training. You can filter the response in variety of -ways, such as choosing which labels to return and returning JSON Lines created after a -specific date. This operation requires permissions to perform the -rekognition:ListDatasetEntries action. + This operation applies only to Amazon Rekognition Custom Labels. Lists the entries +(images) within a dataset. An entry is a JSON Line that contains the information for a +single image, including the image location, assigned labels, and object location bounding +boxes. For more information, see Creating a manifest file. JSON Lines in the response +include information about non-terminal errors found in the dataset. Non terminal errors are +reported in errors lists within each JSON Line. The same information is reported in the +training and testing validation result manifests that Amazon Rekognition Custom Labels +creates during model training. You can filter the response in variety of ways, such as +choosing which labels to return and returning JSON Lines created after a specific date. +This operation requires permissions to perform the rekognition:ListDatasetEntries action. # Arguments - `dataset_arn`: The Amazon Resource Name (ARN) for the dataset that you want to use. @@ -2536,10 +2605,11 @@ end list_dataset_labels(dataset_arn) list_dataset_labels(dataset_arn, params::Dict{String,<:Any}) -Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe -images. For more information, see Labeling images. Lists the labels in a dataset. Amazon -Rekognition Custom Labels uses labels to describe images. For more information, see -Labeling images in the Amazon Rekognition Custom Labels Developer Guide. + This operation applies only to Amazon Rekognition Custom Labels. Lists the labels in a +dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more +information, see Labeling images. Lists the labels in a dataset. Amazon Rekognition +Custom Labels uses labels to describe images. For more information, see Labeling images in +the Amazon Rekognition Custom Labels Developer Guide. # Arguments - `dataset_arn`: The Amazon Resource Name (ARN) of the dataset that you want to use. @@ -2591,12 +2661,14 @@ rekognition:ListFaces action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"FaceIds"`: An array of face IDs to match when listing faces in a collection. +- `"FaceIds"`: An array of face IDs to filter results with when listing faces in a + collection. - `"MaxResults"`: Maximum number of faces to return. - `"NextToken"`: If the previous response was incomplete (because there is more data to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of faces. -- `"UserId"`: An array of user IDs to match when listing faces in a collection. +- `"UserId"`: An array of user IDs to filter results with when listing faces in a + collection. """ function list_faces(CollectionId; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -2621,14 +2693,44 @@ function list_faces( ) end +""" + list_media_analysis_jobs() + list_media_analysis_jobs(params::Dict{String,<:Any}) + +Returns a list of media analysis jobs. Results are sorted by CreationTimestamp in +descending order. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per paginated call. The largest + value user can specify is 100. If user specifies a value greater than 100, an + InvalidParameterException error occurs. The default value is 100. +- `"NextToken"`: Pagination token, if the previous response was incomplete. +""" +function list_media_analysis_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return rekognition( + "ListMediaAnalysisJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_media_analysis_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return rekognition( + "ListMediaAnalysisJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_project_policies(project_arn) list_project_policies(project_arn, params::Dict{String,<:Any}) -Gets a list of the project policies attached to a project. To attach a project policy to a -project, call PutProjectPolicy. To remove a project policy from a project, call -DeleteProjectPolicy. This operation requires permissions to perform the -rekognition:ListProjectPolicies action. + This operation applies only to Amazon Rekognition Custom Labels. Gets a list of the +project policies attached to a project. To attach a project policy to a project, call +PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This +operation requires permissions to perform the rekognition:ListProjectPolicies action. # Arguments - `project_arn`: The ARN of the project for which you want to list the project policies. @@ -2780,18 +2882,19 @@ end put_project_policy(policy_document, policy_name, project_arn) put_project_policy(policy_document, policy_name, project_arn, params::Dict{String,<:Any}) -Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS -account. A project policy specifies that a trusted AWS account can copy a model version -from a trusting AWS account to a project in the trusted AWS account. To copy a model -version you use the CopyProjectVersion operation. For more information about the format of -a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition -Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the -project policy. You can attach multiple project policies to a project. You can also update -an existing project policy by specifying the policy revision ID of the existing policy. To -remove a project policy from a project, call DeleteProjectPolicy. To get a list of project -policies attached to a project, call ListProjectPolicies. You copy a model version by -calling CopyProjectVersion. This operation requires permissions to perform the -rekognition:PutProjectPolicy action. + This operation applies only to Amazon Rekognition Custom Labels. Attaches a project +policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project +policy specifies that a trusted AWS account can copy a model version from a trusting AWS +account to a project in the trusted AWS account. To copy a model version you use the +CopyProjectVersion operation. Only applies to Custom Labels projects. For more information +about the format of a project policy document, see Attaching a project policy (SDK) in the +Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a +revision ID for the project policy. You can attach multiple project policies to a project. +You can also update an existing project policy by specifying the policy revision ID of the +existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To +get a list of project policies attached to a project, call ListProjectPolicies. You copy a +model version by calling CopyProjectVersion. This operation requires permissions to perform +the rekognition:PutProjectPolicy action. # Arguments - `policy_document`: A resource policy to add to the model. The policy is a JSON structure @@ -3450,6 +3553,70 @@ function start_label_detection( ) end +""" + start_media_analysis_job(input, operations_config, output_config) + start_media_analysis_job(input, operations_config, output_config, params::Dict{String,<:Any}) + +Initiates a new media analysis job. Accepts a manifest file in an Amazon S3 bucket. The +output is a manifest file and a summary of the manifest stored in the Amazon S3 bucket. + +# Arguments +- `input`: Input data to be analyzed by the job. +- `operations_config`: Configuration options for the media analysis job to be created. +- `output_config`: The Amazon S3 bucket location to store the results. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: Idempotency token used to prevent the accidental creation of + duplicate versions. If you use the same token with multiple StartMediaAnalysisJobRequest + requests, the same response is returned. Use ClientRequestToken to prevent the same request + from being processed more than once. +- `"JobName"`: The name of the job. Does not have to be unique. +- `"KmsKeyId"`: The identifier of customer managed AWS KMS key (name or ARN). The key is + used to encrypt images copied into the service. The key is also used to encrypt results and + manifest files written to the output Amazon S3 bucket. +""" +function start_media_analysis_job( + Input, OperationsConfig, OutputConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return rekognition( + "StartMediaAnalysisJob", + Dict{String,Any}( + "Input" => Input, + "OperationsConfig" => OperationsConfig, + "OutputConfig" => OutputConfig, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_media_analysis_job( + Input, + OperationsConfig, + OutputConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rekognition( + "StartMediaAnalysisJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Input" => Input, + "OperationsConfig" => OperationsConfig, + "OutputConfig" => OutputConfig, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_person_tracking(video) start_person_tracking(video, params::Dict{String,<:Any}) @@ -3504,21 +3671,17 @@ end start_project_version(min_inference_units, project_version_arn) start_project_version(min_inference_units, project_version_arn, params::Dict{String,<:Any}) -Starts the running of the version of a model. Starting a model takes a while to complete. -To check the current state of the model, use DescribeProjectVersions. Once the model is -running, you can detect custom labels in new images by calling DetectCustomLabels. You are -charged for the amount of time that the model is running. To stop a running model, call -StopProjectVersion. For more information, see Running a trained Amazon Rekognition Custom -Labels model in the Amazon Rekognition Custom Labels Guide. This operation requires -permissions to perform the rekognition:StartProjectVersion action. + This operation applies only to Amazon Rekognition Custom Labels. Starts the running of +the version of a model. Starting a model takes a while to complete. To check the current +state of the model, use DescribeProjectVersions. Once the model is running, you can detect +custom labels in new images by calling DetectCustomLabels. You are charged for the amount +of time that the model is running. To stop a running model, call StopProjectVersion. This +operation requires permissions to perform the rekognition:StartProjectVersion action. # Arguments - `min_inference_units`: The minimum number of inference units to use. A single inference - unit represents 1 hour of processing. For information about the number of transactions per - second (TPS) that an inference unit can support, see Running a trained Amazon Rekognition - Custom Labels model in the Amazon Rekognition Custom Labels Guide. Use a higher number to - increase the TPS throughput of your model. You are charged for the number of inference - units that you use. + unit represents 1 hour of processing. Use a higher number to increase the TPS throughput + of your model. You are charged for the number of inference units that you use. - `project_version_arn`: The Amazon Resource Name(ARN) of the model version that you want to start. @@ -3728,14 +3891,15 @@ end stop_project_version(project_version_arn) stop_project_version(project_version_arn, params::Dict{String,<:Any}) -Stops a running model. The operation might take a while to complete. To check the current -status, call DescribeProjectVersions. This operation requires permissions to perform the -rekognition:StopProjectVersion action. + This operation applies only to Amazon Rekognition Custom Labels. Stops a running model. +The operation might take a while to complete. To check the current status, call +DescribeProjectVersions. Only applies to Custom Labels projects. This operation requires +permissions to perform the rekognition:StopProjectVersion action. # Arguments - `project_version_arn`: The Amazon Resource Name (ARN) of the model version that you want - to delete. This operation requires permissions to perform the - rekognition:StopProjectVersion action. + to stop. This operation requires permissions to perform the rekognition:StopProjectVersion + action. """ function stop_project_version( @@ -3884,22 +4048,23 @@ end update_dataset_entries(changes, dataset_arn) update_dataset_entries(changes, dataset_arn, params::Dict{String,<:Any}) -Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which -contains the information for a single image, including the image location, assigned labels, -and object location bounding boxes. For more information, see Image-Level labels in -manifest files and Object localization in manifest files in the Amazon Rekognition Custom -Labels Developer Guide. If the source-ref field in the JSON line references an existing -image, the existing image in the dataset is updated. If source-ref field doesn't reference -an existing image, the image is added as a new image to the dataset. You specify the -changes that you want to make in the Changes input parameter. There isn't a limit to the -number JSON Lines that you can change, but the size of Changes must be less than 5MB. -UpdateDatasetEntries returns immediatly, but the dataset update might take a while to -complete. Use DescribeDataset to check the current status. The dataset updated successfully -if the value of Status is UPDATE_COMPLETE. To check if any non-terminal errors occured, -call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. -Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). Currently, you -can't access the terminal error information from the Amazon Rekognition Custom Labels SDK. -This operation requires permissions to perform the rekognition:UpdateDatasetEntries action. + This operation applies only to Amazon Rekognition Custom Labels. Adds or updates one or +more entries (images) in a dataset. An entry is a JSON Line which contains the information +for a single image, including the image location, assigned labels, and object location +bounding boxes. For more information, see Image-Level labels in manifest files and Object +localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If +the source-ref field in the JSON line references an existing image, the existing image in +the dataset is updated. If source-ref field doesn't reference an existing image, the image +is added as a new image to the dataset. You specify the changes that you want to make in +the Changes input parameter. There isn't a limit to the number JSON Lines that you can +change, but the size of Changes must be less than 5MB. UpdateDatasetEntries returns +immediatly, but the dataset update might take a while to complete. Use DescribeDataset to +check the current status. The dataset updated successfully if the value of Status is +UPDATE_COMPLETE. To check if any non-terminal errors occured, call ListDatasetEntries and +check for the presence of errors lists in the JSON Lines. Dataset update fails if a +terminal error occurs (Status = UPDATE_FAILED). Currently, you can't access the terminal +error information from the Amazon Rekognition Custom Labels SDK. This operation requires +permissions to perform the rekognition:UpdateDatasetEntries action. # Arguments - `changes`: The changes that you want to make to the dataset. diff --git a/src/services/repostspace.jl b/src/services/repostspace.jl new file mode 100644 index 0000000000..dc0f35f1bb --- /dev/null +++ b/src/services/repostspace.jl @@ -0,0 +1,412 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: repostspace +using AWS.Compat +using AWS.UUIDs + +""" + create_space(name, subdomain, tier) + create_space(name, subdomain, tier, params::Dict{String,<:Any}) + +Creates an AWS re:Post Private private re:Post. + +# Arguments +- `name`: The name for the private re:Post. This must be unique in your account. +- `subdomain`: The subdomain that you use to access your AWS re:Post Private private + re:Post. All custom subdomains must be approved by AWS before use. In addition to your + custom subdomain, all private re:Posts are issued an AWS generated subdomain for immediate + use. +- `tier`: The pricing tier for the private re:Post. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the private re:Post. This is used only to help you + identify this private re:Post. +- `"roleArn"`: The IAM role that grants permissions to the private re:Post to convert + unanswered questions into AWS support tickets. +- `"tags"`: The list of tags associated with the private re:Post. +- `"userKMSKey"`: The AWS KMS key ARN that’s used for the AWS KMS encryption. If you + don't provide a key, your data is encrypted by default with a key that AWS owns and manages + for you. +""" +function create_space( + name, subdomain, tier; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "POST", + "/spaces", + Dict{String,Any}("name" => name, "subdomain" => subdomain, "tier" => tier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_space( + name, + subdomain, + tier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "POST", + "/spaces", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "subdomain" => subdomain, "tier" => tier), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_space(space_id) + delete_space(space_id, params::Dict{String,<:Any}) + +Deletes an AWS re:Post Private private re:Post. + +# Arguments +- `space_id`: The unique ID of the private re:Post. + +""" +function delete_space(spaceId; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "DELETE", + "/spaces/$(spaceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_space( + spaceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "DELETE", + "/spaces/$(spaceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + deregister_admin(admin_id, space_id) + deregister_admin(admin_id, space_id, params::Dict{String,<:Any}) + +Removes the user or group from the list of administrators of the private re:Post. + +# Arguments +- `admin_id`: The ID of the admin to remove. +- `space_id`: The ID of the private re:Post to remove the admin from. + +""" +function deregister_admin( + adminId, spaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "DELETE", + "/spaces/$(spaceId)/admins/$(adminId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deregister_admin( + adminId, + spaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "DELETE", + "/spaces/$(spaceId)/admins/$(adminId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_space(space_id) + get_space(space_id, params::Dict{String,<:Any}) + +Displays information about the AWS re:Post Private private re:Post. + +# Arguments +- `space_id`: The ID of the private re:Post. + +""" +function get_space(spaceId; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "GET", "/spaces/$(spaceId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_space( + spaceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "GET", + "/spaces/$(spaceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_spaces() + list_spaces(params::Dict{String,<:Any}) + +Returns a list of AWS re:Post Private private re:Posts in the account with some information +about each private re:Post. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of private re:Posts to include in the results. +- `"nextToken"`: The token for the next set of private re:Posts to return. You receive this + token from a previous ListSpaces operation. +""" +function list_spaces(; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "GET", "/spaces"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_spaces( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "GET", "/spaces", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns the tags that are associated with the AWS re:Post Private resource specified by the +resourceArn. The only resource that can be tagged is a private re:Post. + +# Arguments +- `resource_arn`: The ARN of the resource that the tags are associated with. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + register_admin(admin_id, space_id) + register_admin(admin_id, space_id, params::Dict{String,<:Any}) + +Adds a user or group to the list of administrators of the private re:Post. + +# Arguments +- `admin_id`: The ID of the administrator. +- `space_id`: The ID of the private re:Post. + +""" +function register_admin(adminId, spaceId; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "POST", + "/spaces/$(spaceId)/admins/$(adminId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function register_admin( + adminId, + spaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "POST", + "/spaces/$(spaceId)/admins/$(adminId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + send_invites(accessor_ids, body, space_id, title) + send_invites(accessor_ids, body, space_id, title, params::Dict{String,<:Any}) + +Sends an invitation email to selected users and groups. + +# Arguments +- `accessor_ids`: The array of identifiers for the users and groups. +- `body`: The body of the invite. +- `space_id`: The ID of the private re:Post. +- `title`: The title of the invite. + +""" +function send_invites( + accessorIds, body, spaceId, title; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "POST", + "/spaces/$(spaceId)/invite", + Dict{String,Any}("accessorIds" => accessorIds, "body" => body, "title" => title); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_invites( + accessorIds, + body, + spaceId, + title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "POST", + "/spaces/$(spaceId)/invite", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "accessorIds" => accessorIds, "body" => body, "title" => title + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associates tags with an AWS re:Post Private resource. Currently, the only resource that can +be tagged is the private re:Post. If you specify a new tag key for the resource, the tag is +appended to the list of tags that are associated with the resource. If you specify a tag +key that’s already associated with the resource, the new tag value that you specify +replaces the previous value for that tag. + +# Arguments +- `resource_arn`: The ARN of the resource that the tag is associated with. +- `tags`: The list of tag keys and values that must be associated with the resource. You + can associate tag keys only, tags (key and values) only, or a combination of tag keys and + tags. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes the association of the tag with the AWS re:Post Private resource. + +# Arguments +- `resource_arn`: The ARN of the resource. +- `tag_keys`: The key values of the tag. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return repostspace( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_space(space_id) + update_space(space_id, params::Dict{String,<:Any}) + +Modifies an existing AWS re:Post Private private re:Post. + +# Arguments +- `space_id`: The unique ID of this private re:Post. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the private re:Post. This is used only to help you + identify this private re:Post. +- `"roleArn"`: The IAM role that grants permissions to the private re:Post to convert + unanswered questions into AWS support tickets. +- `"tier"`: The pricing tier of this private re:Post. +""" +function update_space(spaceId; aws_config::AbstractAWSConfig=global_aws_config()) + return repostspace( + "PUT", "/spaces/$(spaceId)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_space( + spaceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return repostspace( + "PUT", + "/spaces/$(spaceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/resiliencehub.jl b/src/services/resiliencehub.jl index b6db13734d..55f64d15ed 100644 --- a/src/services/resiliencehub.jl +++ b/src/services/resiliencehub.jl @@ -8,13 +8,16 @@ using AWS.UUIDs add_draft_app_version_resource_mappings(app_arn, resource_mappings) add_draft_app_version_resource_mappings(app_arn, resource_mappings, params::Dict{String,<:Any}) -Adds the resource mapping for the draft application version. You can also update an -existing resource mapping to a new physical resource. +Adds the source of resource-maps to the draft version of an application. During assessment, +Resilience Hub will use these resource-maps to resolve the latest physical ID for each +resource in the application template. For more information about different types of +resources suported by Resilience Hub and how to add them in your application, see Step 2: +How is your application managed? in the Resilience Hub User Guide. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `resource_mappings`: Mappings used to map logical resources from the template to physical resources. You can use the mapping type CFN_STACK if the application template uses a @@ -57,24 +60,72 @@ function add_draft_app_version_resource_mappings( ) end +""" + batch_update_recommendation_status(app_arn, request_entries) + batch_update_recommendation_status(app_arn, request_entries, params::Dict{String,<:Any}) + +Enables you to include or exclude one or more operational recommendations. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `request_entries`: Defines the list of operational recommendations that need to be + included or excluded. + +""" +function batch_update_recommendation_status( + appArn, requestEntries; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/batch-update-recommendation-status", + Dict{String,Any}("appArn" => appArn, "requestEntries" => requestEntries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_update_recommendation_status( + appArn, + requestEntries, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/batch-update-recommendation-status", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("appArn" => appArn, "requestEntries" => requestEntries), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_app(name) create_app(name, params::Dict{String,<:Any}) Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services -application disruptions. To describe an Resilience Hub application, you provide an +application disruptions. To describe a Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, -Terraform state files, AppRegistry applications, and an appropriate resiliency policy. For -more information about the number of resources supported per application, see Service -Quotas. After you create an Resilience Hub application, you publish it so that you can run -a resiliency assessment on it. You can then use recommendations from the assessment to -improve resiliency by running another assessment, comparing results, and then iterating the -process until you achieve your goals for recovery time objective (RTO) and recovery point -objective (RPO). +Terraform state files, AppRegistry applications, and an appropriate resiliency policy. In +addition, you can also add resources that are located on Amazon Elastic Kubernetes Service +(Amazon EKS) clusters as optional resources. For more information about the number of +resources supported per application, see Service quotas. After you create an Resilience Hub +application, you publish it so that you can run a resiliency assessment on it. You can then +use recommendations from the assessment to improve resiliency by running another +assessment, comparing results, and then iterating the process until you achieve your goals +for recovery time objective (RTO) and recovery point objective (RPO). # Arguments -- `name`: The name for the application. +- `name`: Name of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -83,12 +134,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests. - `"description"`: The optional description for an app. -- `"policyArn"`: The Amazon Resource Name (ARN) of the resiliency policy. The format for - this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For - more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. -- `"tags"`: The tags assigned to the resource. A tag is a label that you assign to an - Amazon Web Services resource. Each tag consists of a key/value pair. +- `"eventSubscriptions"`: The list of events you would like to subscribe and get + notification for. Currently, Resilience Hub supports only Drift detected and Scheduled + assessment failure events notification. +- `"permissionModel"`: Defines the roles and credentials that Resilience Hub would use + while creating the application, importing its resources, and running an assessment. +- `"policyArn"`: Amazon Resource Name (ARN) of the resiliency policy. The format for this + ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more + information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services + General Reference guide. +- `"tags"`: Tags assigned to the resource. A tag is a label that you assign to an Amazon + Web Services resource. Each tag consists of a key/value pair. """ function create_app(name; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( @@ -127,12 +183,12 @@ assessments, you must publish the Resilience Hub application using the PublishAp API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `name`: The name of the Application Component. -- `type`: The type of Application Component. For more information about the types of +- `name`: Name of the Application Component. +- `type`: Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent. # Optional Parameters @@ -142,7 +198,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientToken"`: Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests. -- `"id"`: The identifier of the Application Component. +- `"id"`: Identifier of the Application Component. """ function create_app_version_app_component( appArn, name, type; aws_config::AbstractAWSConfig=global_aws_config() @@ -200,25 +256,25 @@ application using the PublishAppVersion API. To update application version wit physicalResourceID, you must call ResolveAppVersionResources API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_components`: The list of Application Components that this resource belongs to. If an +- `app_components`: List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added. -- `logical_resource_id`: The logical identifier of the resource. -- `physical_resource_id`: The physical identifier of the resource. -- `resource_type`: The type of resource. +- `logical_resource_id`: Logical identifier of the resource. +- `physical_resource_id`: Physical identifier of the resource. +- `resource_type`: Type of resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"additionalInfo"`: Currently, there is no supported additional information for resources. -- `"awsAccountId"`: The Amazon Web Services account that owns the physical resource. -- `"awsRegion"`: The Amazon Web Services region that owns the physical resource. +- `"awsAccountId"`: Amazon Web Services account that owns the physical resource. +- `"awsRegion"`: Amazon Web Services region that owns the physical resource. - `"clientToken"`: Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests. -- `"resourceName"`: The name of the resource. +- `"resourceName"`: Name of the resource. """ function create_app_version_resource( appArn, @@ -281,9 +337,9 @@ end Creates a new recommendation template for the Resilience Hub application. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `name`: The name for the recommendation template. @@ -301,8 +357,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"recommendationTypes"`: An array of strings that specify the recommendation template type or types. Alarm The template is an AlarmRecommendation template. Sop The template is a SopRecommendation template. Test The template is a TestRecommendation template. -- `"tags"`: The tags assigned to the resource. A tag is a label that you assign to an - Amazon Web Services resource. Each tag consists of a key/value pair. +- `"tags"`: Tags assigned to the resource. A tag is a label that you assign to an Amazon + Web Services resource. Each tag consists of a key/value pair. """ function create_recommendation_template( assessmentArn, name; aws_config::AbstractAWSConfig=global_aws_config() @@ -348,7 +404,12 @@ end create_resiliency_policy(policy, policy_name, tier) create_resiliency_policy(policy, policy_name, tier, params::Dict{String,<:Any}) -Creates a resiliency policy for an application. +Creates a resiliency policy for an application. Resilience Hub allows you to provide a +value of zero for rtoInSecs and rpoInSecs of your resiliency policy. But, while assessing +your application, the lowest possible assessment result is near zero. Hence, if you provide +value zero for rtoInSecs and rpoInSecs, the estimated workload RTO and estimated workload +RPO result will be near zero and the Compliance status for your application will be set to +Policy breached. # Arguments - `policy`: The type of resiliency policy to be created, including the recovery time @@ -365,8 +426,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dataLocationConstraint"`: Specifies a high-level geographical location constraint for where your resilience policy data can be stored. - `"policyDescription"`: The description for the policy. -- `"tags"`: The tags assigned to the resource. A tag is a label that you assign to an - Amazon Web Services resource. Each tag consists of a key/value pair. +- `"tags"`: Tags assigned to the resource. A tag is a label that you assign to an Amazon + Web Services resource. Each tag consists of a key/value pair. """ function create_resiliency_policy( policy, policyName, tier; aws_config::AbstractAWSConfig=global_aws_config() @@ -418,9 +479,9 @@ end Deletes an Resilience Hub application. This is a destructive action that can't be undone. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -465,9 +526,9 @@ Deletes an Resilience Hub application assessment. This is a destructive action t be undone. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -519,9 +580,9 @@ Deletes the input source and all of its imported resources from the Resilience H application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -533,7 +594,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cluster that you want to delete from the Resilience Hub application. - `"sourceArn"`: The Amazon Resource Name (ARN) of the imported resource you want to remove from the Resilience Hub application. For more information about ARNs, see Amazon Resource - Names (ARNs) in the AWS General Reference guide. + Names (ARNs) in the Amazon Web Services General Reference guide. - `"terraformSource"`: The imported Terraform s3 state file you want to remove from the Resilience Hub application. """ @@ -575,11 +636,11 @@ API. You will not be able to delete an Application Component if it has resourc associated with it. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `id`: The identifier of the Application Component. +- `id`: Identifier of the Application Component. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -632,21 +693,21 @@ Hub application draft version. To use this resource for running resiliency asses must publish the Resilience Hub application using the PublishAppVersion API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"awsAccountId"`: The Amazon Web Services account that owns the physical resource. -- `"awsRegion"`: The Amazon Web Services region that owns the physical resource. +- `"awsAccountId"`: Amazon Web Services account that owns the physical resource. +- `"awsRegion"`: Amazon Web Services region that owns the physical resource. - `"clientToken"`: Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests. -- `"logicalResourceId"`: The logical identifier of the resource. -- `"physicalResourceId"`: The physical identifier of the resource. -- `"resourceName"`: The name of the resource. +- `"logicalResourceId"`: Logical identifier of the resource. +- `"physicalResourceId"`: Physical identifier of the resource. +- `"resourceName"`: Name of the resource. """ function delete_app_version_resource( appArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -737,10 +798,10 @@ end Deletes a resiliency policy. This is a destructive action that can't be undone. # Arguments -- `policy_arn`: The Amazon Resource Name (ARN) of the resiliency policy. The format for - this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For - more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. +- `policy_arn`: Amazon Resource Name (ARN) of the resiliency policy. The format for this + ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more + information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services + General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -788,9 +849,9 @@ end Describes an Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. """ @@ -822,9 +883,9 @@ end Describes an assessment for an Resilience Hub application. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. """ @@ -862,11 +923,11 @@ end Describes the Resilience Hub application version. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_version`: The Resilience Hub application version. +- `app_version`: Resilience Hub application version. """ function describe_app_version( @@ -908,12 +969,12 @@ end Describes an Application Component in the Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_version`: The Resilience Hub application version. -- `id`: The identifier of the Application Component. +- `app_version`: Resilience Hub application version. +- `id`: Identifier of the Application Component. """ function describe_app_version_app_component( @@ -961,19 +1022,19 @@ physicalResourceId (Along with physicalResourceId, you can also provide awsAccou awsRegion) # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_version`: The Resilience Hub application version. +- `app_version`: Resilience Hub application version. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"awsAccountId"`: The Amazon Web Services account that owns the physical resource. -- `"awsRegion"`: The Amazon Web Services region that owns the physical resource. -- `"logicalResourceId"`: The logical identifier of the resource. -- `"physicalResourceId"`: The physical identifier of the resource. -- `"resourceName"`: The name of the resource. +- `"awsAccountId"`: Amazon Web Services account that owns the physical resource. +- `"awsRegion"`: Amazon Web Services region that owns the physical resource. +- `"logicalResourceId"`: Logical identifier of the resource. +- `"physicalResourceId"`: Physical identifier of the resource. +- `"resourceName"`: Name of the resource. """ function describe_app_version_resource( appArn, appVersion; aws_config::AbstractAWSConfig=global_aws_config() @@ -1015,9 +1076,9 @@ Returns the resolution status for the specified resolution identifier for an app version. If resolutionId is not specified, the current resolution status is returned. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. @@ -1064,9 +1125,9 @@ end Describes details about an Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. @@ -1113,9 +1174,9 @@ importResourcesToDraftAppVersion after creating the application and before calli describeDraftAppVersionResourcesImportStatus to obtain the status. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. """ @@ -1151,10 +1212,10 @@ policy object includes creation time, data location constraints, the Amazon Reso (ARN) for the policy, tags, tier, and more. # Arguments -- `policy_arn`: The Amazon Resource Name (ARN) of the resiliency policy. The format for - this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For - more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. +- `policy_arn`: Amazon Resource Name (ARN) of the resiliency policy. The format for this + ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more + information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services + General Reference guide. """ function describe_resiliency_policy( @@ -1193,9 +1254,9 @@ For more information about the input sources supported by Resilience Hub, see Di structure and describe your Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -1237,14 +1298,14 @@ end Lists the alarm recommendations for an Resilience Hub application. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1276,6 +1337,97 @@ function list_alarm_recommendations( ) end +""" + list_app_assessment_compliance_drifts(assessment_arn) + list_app_assessment_compliance_drifts(assessment_arn, params::Dict{String,<:Any}) + +List of compliance drifts that were detected while running an assessment. + +# Arguments +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Indicates the maximum number of applications requested. +- `"nextToken"`: Indicates the unique token number of the next application to be checked + for compliance and regulatory requirements from the list of applications. +""" +function list_app_assessment_compliance_drifts( + assessmentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/list-app-assessment-compliance-drifts", + Dict{String,Any}("assessmentArn" => assessmentArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_app_assessment_compliance_drifts( + assessmentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/list-app-assessment-compliance-drifts", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("assessmentArn" => assessmentArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_app_assessment_resource_drifts(assessment_arn) + list_app_assessment_resource_drifts(assessment_arn, params::Dict{String,<:Any}) + +Indicates the list of resource drifts that were detected while running an assessment. + +# Arguments +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Indicates the maximum number of drift results to include in the response. + If more results exist than the specified MaxResults value, a token is included in the + response so that the remaining results can be retrieved. +- `"nextToken"`: Null, or the token from a previous call to get the next set of results. +""" +function list_app_assessment_resource_drifts( + assessmentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/list-app-assessment-resource-drifts", + Dict{String,Any}("assessmentArn" => assessmentArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_app_assessment_resource_drifts( + assessmentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/list-app-assessment-resource-drifts", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("assessmentArn" => assessmentArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_app_assessments() list_app_assessments(params::Dict{String,<:Any}) @@ -1285,16 +1437,16 @@ refine the results for the response object. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"appArn"`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `"appArn"`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `"assessmentName"`: The name for the assessment. - `"assessmentStatus"`: The current status of the assessment for the resiliency policy. - `"complianceStatus"`: The current status of compliance for the resiliency policy. - `"invoker"`: Specifies the entity that invoked a specific assessment, either a User or the System. -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1328,14 +1480,14 @@ end Lists the compliances for an Resilience Hub Application Component. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1374,14 +1526,14 @@ end Lists the recommendations for an Resilience Hub Application Component. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1422,11 +1574,11 @@ the input sources supported by Resilience Hub, see Discover the structure and de Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_version`: The Resilience Hub application version. +- `app_version`: Resilience Hub application version. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1473,11 +1625,11 @@ end Lists all the Application Components in the Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `app_version`: The version of the Application Component. +- `app_version`: Version of the Application Component. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1526,15 +1678,15 @@ physical resource identifiers, CloudFormation stacks, resource-groups, or an app registry app. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1578,15 +1730,15 @@ end Lists all the resources in an Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1631,17 +1783,19 @@ end Lists the different versions for the Resilience Hub applications. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"endTime"`: Upper limit of the time range to filter the application versions. +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. +- `"startTime"`: Lower limit of the time range to filter the application versions. """ function list_app_versions(appArn; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( @@ -1675,15 +1829,22 @@ calling the ListApps operation: Only one filter is supported for this operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"appArn"`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `"appArn"`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"fromLastAssessmentTime"`: Indicates the lower limit of the range that is used to filter + applications based on their last assessment times. +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"name"`: The name for the one of the listed applications. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. +- `"reverseOrder"`: The application list is sorted based on the values of + lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending + order. To sort the appliation list in descending order, set this field to True. +- `"toLastAssessmentTime"`: Indicates the upper limit of the range that is used to filter + the applications based on their last assessment times. """ function list_apps(; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( @@ -1699,20 +1860,18 @@ function list_apps( end """ - list_recommendation_templates(assessment_arn) - list_recommendation_templates(assessment_arn, params::Dict{String,<:Any}) + list_recommendation_templates() + list_recommendation_templates(params::Dict{String,<:Any}) Lists the recommendation templates for the Resilience Hub applications. -# Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. - # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"assessmentArn"`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"name"`: The name for one of the listed recommendation templates. @@ -1721,30 +1880,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys template. - `"reverseOrder"`: The default is to sort by ascending startTime. To sort by descending startTime, set reverseOrder to true. -- `"status"`: The status of the action. +- `"status"`: Status of the action. """ -function list_recommendation_templates( - assessmentArn; aws_config::AbstractAWSConfig=global_aws_config() -) +function list_recommendation_templates(; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( "GET", - "/list-recommendation-templates", - Dict{String,Any}("assessmentArn" => assessmentArn); + "/list-recommendation-templates"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function list_recommendation_templates( - assessmentArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return resiliencehub( "GET", "/list-recommendation-templates", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("assessmentArn" => assessmentArn), params) - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1758,7 +1910,7 @@ Lists the resiliency policies for the Resilience Hub applications. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1792,14 +1944,14 @@ Lists the standard operating procedure (SOP) recommendations for the Resilience applications. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1839,7 +1991,7 @@ Lists the suggested resiliency policies for the Resilience Hub applications. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1908,14 +2060,14 @@ end Lists the test recommendations for the Resilience Hub application. # Arguments -- `assessment_arn`: The Amazon Resource Name (ARN) of the assessment. The format for this - ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `assessment_arn`: Amazon Resource Name (ARN) of the assessment. The format for this ARN + is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -1956,15 +2108,15 @@ resource is a resource that exists in the object that was used to create an app, supported by Resilience Hub. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to include in the response. If more results +- `"maxResults"`: Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. - `"nextToken"`: Null, or the token from a previous call to get the next set of results. @@ -2009,11 +2161,14 @@ end Publishes a new version of a specific Resilience Hub application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"versionName"`: Name of the application version. """ function publish_app_version(appArn; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( @@ -2043,23 +2198,23 @@ end Adds or updates the app template for an Resilience Hub application draft version. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_template_body`: A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section. The appTemplateBody JSON string has the following structure: resources The list of logical resources that must be included in the Resilience Hub application. Type: Array Don't add the resources that you want to exclude. - Each resources array item includes the following fields: logicalResourceId The - logical identifier of the resource. Type: Object Each logicalResourceId object includes the - following fields: identifier The identifier of the resource. Type: String + Each resources array item includes the following fields: logicalResourceId Logical + identifier of the resource. Type: Object Each logicalResourceId object includes the + following fields: identifier Identifier of the resource. Type: String logicalStackName The name of the CloudFormation stack this resource belongs to. Type: String resourceGroupName The name of the resource group this resource belongs to. Type: String terraformSourceName The name of the Terraform S3 state file this resource - belongs to. Type: String eksSourceName The name of the Amazon Elastic Kubernetes - Service cluster and namespace this resource belongs to. This parameter accepts values in + belongs to. Type: String eksSourceName Name of the Amazon Elastic Kubernetes Service + cluster and namespace this resource belongs to. This parameter accepts values in \"eks-cluster/namespace\" format. Type: String type The type of resource. Type: string name The name of the resource. Type: String additionalInfo Additional configuration parameters for an Resilience Hub application. If you want to implement @@ -2067,12 +2222,12 @@ Adds or updates the app template for an Resilience Hub application draft version Configure the application configuration parameters. Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account. Key: \"failover-regions\" Value: \"[{\"region\":\"<REGION>\", - \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\" appComponents The list of + \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\" appComponents List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added. Type: Array Each appComponents - array item includes the following fields: name The name of the Application Component. - Type: String type The type of Application Component. For more information about the - types of Application Component, see Grouping resources in an AppComponent. Type: String + array item includes the following fields: name Name of the Application Component. Type: + String type Type of Application Component. For more information about the types of + Application Component, see Grouping resources in an AppComponent. Type: String resourceNames The list of included resources that are assigned to the Application Component. Type: Array of strings additionalInfo Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the @@ -2083,16 +2238,16 @@ Adds or updates the app template for an Resilience Hub application draft version \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\" excludedResources The list of logical resource identifiers to be excluded from the application. Type: Array Don't add the resources that you want to include. Each excludedResources array item includes the - following fields: logicalResourceIds The logical identifier of the resource. Type: - Object You can configure only one of the following fields: logicalStackName + following fields: logicalResourceIds Logical identifier of the resource. Type: Object + You can configure only one of the following fields: logicalStackName resourceGroupName terraformSourceName eksSourceName Each logicalResourceIds - object includes the following fields: identifier The identifier of the resource. Type: + object includes the following fields: identifier Identifier of the resource. Type: String logicalStackName The name of the CloudFormation stack this resource belongs to. Type: String resourceGroupName The name of the resource group this resource belongs to. Type: String terraformSourceName The name of the Terraform S3 state file this resource - belongs to. Type: String eksSourceName The name of the Amazon Elastic Kubernetes - Service cluster and namespace this resource belongs to. This parameter accepts values in - \"eks-cluster/namespace\" format. Type: String version The Resilience Hub + belongs to. Type: String eksSourceName Name of the Amazon Elastic Kubernetes Service + cluster and namespace this resource belongs to. This parameter accepts values in + \"eks-cluster/namespace\" format. Type: String version Resilience Hub application version. additionalInfo Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration @@ -2140,9 +2295,9 @@ end Removes resource mappings from a draft application version. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -2191,9 +2346,9 @@ end Resolves the resources for an application version. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. @@ -2237,9 +2392,9 @@ end Creates a new application assessment for an application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - `app_version`: The version of the application. - `assessment_name`: The name for the assessment. @@ -2249,8 +2404,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientToken"`: Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests. -- `"tags"`: The tags assigned to the resource. A tag is a label that you assign to an - Amazon Web Services resource. Each tag consists of a key/value pair. +- `"tags"`: Tags assigned to the resource. A tag is a label that you assign to an Amazon + Web Services resource. Each tag consists of a key/value pair. """ function start_app_assessment( appArn, appVersion, assessmentName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2302,7 +2457,7 @@ end Applies one or more tags to a resource. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `resource_arn`: Amazon Resource Name (ARN) of the resource. - `tags`: The tags to assign to the resource. Each tag consists of a key/value pair. """ @@ -2337,7 +2492,7 @@ end Removes one or more tags from a resource. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `resource_arn`: Amazon Resource Name (ARN) of the resource. - `tag_keys`: The keys of the tags you want to remove. """ @@ -2374,9 +2529,9 @@ end Updates an application. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -2384,10 +2539,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"assessmentSchedule"`: Assessment execution schedule with 'Daily' or 'Disabled' values. - `"clearResiliencyPolicyArn"`: Specifies if the resiliency policy ARN should be cleared. - `"description"`: The optional description for an app. -- `"policyArn"`: The Amazon Resource Name (ARN) of the resiliency policy. The format for - this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For - more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. +- `"eventSubscriptions"`: The list of events you would like to subscribe and get + notification for. Currently, Resilience Hub supports notifications only for Drift detected + and Scheduled assessment failure events. +- `"permissionModel"`: Defines the roles and credentials that Resilience Hub would use + while creating an application, importing its resources, and running an assessment. +- `"policyArn"`: Amazon Resource Name (ARN) of the resiliency policy. The format for this + ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more + information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services + General Reference guide. """ function update_app(appArn; aws_config::AbstractAWSConfig=global_aws_config()) return resiliencehub( @@ -2419,9 +2579,9 @@ application draft version. To use this information for running resiliency assess must publish the Resilience Hub application using the PublishAppVersion API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters @@ -2464,18 +2624,18 @@ running assessments, you must publish the Resilience Hub application using the PublishAppVersion API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. -- `id`: The identifier of the Application Component. +- `id`: Identifier of the Application Component. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"additionalInfo"`: Currently, there is no supported additional information for Application Components. -- `"name"`: The name of the Application Component. -- `"type"`: The type of Application Component. For more information about the types of +- `"name"`: Name of the Application Component. +- `"type"`: Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent. """ function update_app_version_app_component( @@ -2517,24 +2677,24 @@ Resilience Hub application using the PublishAppVersion API. To update applicat with new physicalResourceID, you must call ResolveAppVersionResources API. # Arguments -- `app_arn`: The Amazon Resource Name (ARN) of the Resilience Hub application. The format - for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more - information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"additionalInfo"`: Currently, there is no supported additional information for resources. -- `"appComponents"`: The list of Application Components that this resource belongs to. If - an Application Component is not part of the Resilience Hub application, it will be added. -- `"awsAccountId"`: The Amazon Web Services account that owns the physical resource. -- `"awsRegion"`: The Amazon Web Services region that owns the physical resource. +- `"appComponents"`: List of Application Components that this resource belongs to. If an + Application Component is not part of the Resilience Hub application, it will be added. +- `"awsAccountId"`: Amazon Web Services account that owns the physical resource. +- `"awsRegion"`: Amazon Web Services region that owns the physical resource. - `"excluded"`: Indicates if a resource is excluded from an Resilience Hub application. You can exclude only imported resources from an Resilience Hub application. -- `"logicalResourceId"`: The logical identifier of the resource. -- `"physicalResourceId"`: The physical identifier of the resource. -- `"resourceName"`: The name of the resource. -- `"resourceType"`: The type of resource. +- `"logicalResourceId"`: Logical identifier of the resource. +- `"physicalResourceId"`: Physical identifier of the resource. +- `"resourceName"`: Name of the resource. +- `"resourceType"`: Type of resource. """ function update_app_version_resource( appArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -2563,13 +2723,17 @@ end update_resiliency_policy(policy_arn) update_resiliency_policy(policy_arn, params::Dict{String,<:Any}) -Updates a resiliency policy. +Updates a resiliency policy. Resilience Hub allows you to provide a value of zero for +rtoInSecs and rpoInSecs of your resiliency policy. But, while assessing your application, +the lowest possible assessment result is near zero. Hence, if you provide value zero for +rtoInSecs and rpoInSecs, the estimated workload RTO and estimated workload RPO result will +be near zero and the Compliance status for your application will be set to Policy breached. # Arguments -- `policy_arn`: The Amazon Resource Name (ARN) of the resiliency policy. The format for - this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For - more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference - guide. +- `policy_arn`: Amazon Resource Name (ARN) of the resiliency policy. The format for this + ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more + information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services + General Reference guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/resource_explorer_2.jl b/src/services/resource_explorer_2.jl index 2fabe773cb..3d7d97f07a 100644 --- a/src/services/resource_explorer_2.jl +++ b/src/services/resource_explorer_2.jl @@ -105,7 +105,7 @@ service-linked role for all additional indexes you create afterwards. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: This value helps ensure idempotency. Resource Explorer uses this value to prevent the accidental creation of duplicate versions. We recommend that you generate a - UUID-type value to ensure the uniqueness of your views. + UUID-type value to ensure the uniqueness of your index. - `"Tags"`: The specified tags are attached only to the index created in this Amazon Web Services Region. The tags aren't attached to any of the resources listed in the index. """ @@ -166,6 +166,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IncludedProperties"`: Specifies optional fields that you want included in search results from this view. It is a list of objects that each describe a field to include. The default is an empty list, with no optional fields included in the results. +- `"Scope"`: The root ARN of the account, an organizational unit (OU), or an organization + ARN. If left empty, the default is account. - `"Tags"`: Tag key and value pairs that are attached to the view. """ function create_view(ViewName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -300,6 +302,38 @@ function disassociate_default_view( ) end +""" + get_account_level_service_configuration() + get_account_level_service_configuration(params::Dict{String,<:Any}) + +Retrieves the status of your account's Amazon Web Services service access, and validates +the service linked role required to access the multi-account search feature. Only the +management account or a delegated administrator with service access enabled can invoke this +API call. + +""" +function get_account_level_service_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return resource_explorer_2( + "POST", + "/GetAccountLevelServiceConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_account_level_service_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resource_explorer_2( + "POST", + "/GetAccountLevelServiceConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_default_view() get_default_view(params::Dict{String,<:Any}) @@ -398,7 +432,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to - indicate where the output should continue from. + indicate where the output should continue from. The pagination tokens expire after 24 hours. - `"Regions"`: If specified, limits the response to only information about the index in the specified list of Amazon Web Services Regions. - `"Type"`: If specified, limits the output to only indexes of the specified Type, either @@ -421,6 +455,61 @@ function list_indexes( ) end +""" + list_indexes_for_members(account_id_list) + list_indexes_for_members(account_id_list, params::Dict{String,<:Any}) + +Retrieves a list of a member's indexes in all Amazon Web Services Regions that are +currently collecting resource information for Amazon Web Services Resource Explorer. Only +the management account or a delegated administrator with service access enabled can invoke +this API call. + +# Arguments +- `account_id_list`: The account IDs will limit the output to only indexes from these + accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results that you want included on each page of the + response. If you do not include this parameter, it defaults to a value appropriate to the + operation. If additional items exist beyond those included in the current response, the + NextToken response element is present and has a value (is not null). Include that value as + the NextToken request parameter in the next call to the operation to get the next part of + the results. An API operation can return fewer results than the maximum even when there + are more results available. You should check NextToken after every operation to ensure that + you receive all of the results. +- `"NextToken"`: The parameter for receiving additional results if you receive a NextToken + response in a previous request. A NextToken response indicates that more output is + available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. The pagination tokens expire after 24 hours. +""" +function list_indexes_for_members( + AccountIdList; aws_config::AbstractAWSConfig=global_aws_config() +) + return resource_explorer_2( + "POST", + "/ListIndexesForMembers", + Dict{String,Any}("AccountIdList" => AccountIdList); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_indexes_for_members( + AccountIdList, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resource_explorer_2( + "POST", + "/ListIndexesForMembers", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AccountIdList" => AccountIdList), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_supported_resource_types() list_supported_resource_types(params::Dict{String,<:Any}) @@ -441,7 +530,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to - indicate where the output should continue from. + indicate where the output should continue from. The pagination tokens expire after 24 hours. """ function list_supported_resource_types(; aws_config::AbstractAWSConfig=global_aws_config()) return resource_explorer_2( @@ -521,7 +610,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to - indicate where the output should continue from. + indicate where the output should continue from. The pagination tokens expire after 24 hours. """ function list_views(; aws_config::AbstractAWSConfig=global_aws_config()) return resource_explorer_2( @@ -572,7 +661,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to - indicate where the output should continue from. + indicate where the output should continue from. The pagination tokens expire after 24 hours. - `"ViewArn"`: Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the diff --git a/src/services/resource_groups.jl b/src/services/resource_groups.jl index 1b6780d66a..be3d3de87a 100644 --- a/src/services/resource_groups.jl +++ b/src/services/resource_groups.jl @@ -19,8 +19,9 @@ run this command, you must have the following permissions: resource-groups:Cr - `name`: The name of the group, which is the identifier of the group in other operations. You can't change the name of a resource group after you create it. A resource group name can consist of letters, numbers, hyphens, periods, and underscores. The name cannot start - with AWS or aws; these are reserved. A resource group name must be unique within each - Amazon Web Services Region in your Amazon Web Services account. + with AWS, aws, or any other possible capitalization; these are reserved. A resource group + name must be unique within each Amazon Web Services Region in your Amazon Web Services + account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -358,12 +359,14 @@ this command, you must have the following permissions: resource-groups:ListGr # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: Filters, formatted as GroupFilter objects, that you want to apply to a - ListGroups operation. resource-type - Filter the results to include only those of the - specified resource types. Specify up to five resource types in the format - AWS::ServiceCode::ResourceType . For example, AWS::EC2::Instance, or AWS::S3::Bucket. - configuration-type - Filter the results to include only those groups that have the - specified configuration types attached. The current supported values are: - AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement + ListGroups operation. resource-type - Filter the results to include only those resource + groups that have the specified resource type in their ResourceTypeFilter. For example, + AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes + AWS::EC2::Instance. configuration-type - Filter the results to include only those groups + that have the specified configuration types attached. The current supported values are: + AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups + AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool + AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup - `"maxResults"`: The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken diff --git a/src/services/rolesanywhere.jl b/src/services/rolesanywhere.jl index 22c3db4148..25794f50c5 100644 --- a/src/services/rolesanywhere.jl +++ b/src/services/rolesanywhere.jl @@ -19,7 +19,9 @@ You use profiles to intersect permissions with IAM managed policies. Required p # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"durationSeconds"`: The number of seconds the vended session credentials are valid for. +- `"durationSeconds"`: Used to determine how long sessions vended using this profile are + valid for. See the Expiration section of the CreateSession API documentation page for more + details. In requests, if this value is not provided, the default value will be 3600. - `"enabled"`: Specifies whether the profile is enabled. - `"managedPolicyArns"`: A list of managed policy ARNs that apply to the vended session credentials. @@ -107,6 +109,52 @@ function create_trust_anchor( ) end +""" + delete_attribute_mapping(certificate_field, profile_id) + delete_attribute_mapping(certificate_field, profile_id, params::Dict{String,<:Any}) + +Delete an entry from the attribute mapping rules enforced by a given profile. + +# Arguments +- `certificate_field`: Fields (x509Subject, x509Issuer and x509SAN) within X.509 + certificates. +- `profile_id`: The unique identifier of the profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"specifiers"`: A list of specifiers of a certificate field; for example, CN, OU, UID + from a Subject. +""" +function delete_attribute_mapping( + certificateField, profileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return rolesanywhere( + "DELETE", + "/profiles/$(profileId)/mappings", + Dict{String,Any}("certificateField" => certificateField); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_attribute_mapping( + certificateField, + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rolesanywhere( + "DELETE", + "/profiles/$(profileId)/mappings", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("certificateField" => certificateField), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_crl(crl_id) delete_crl(crl_id, params::Dict{String,<:Any}) @@ -531,9 +579,9 @@ end import_crl(crl_data, name, trust_anchor_arn, params::Dict{String,<:Any}) Imports the certificate revocation list (CRL). A CRL is a list of certificates that have -been revoked by the issuing certificate Authority (CA). IAM Roles Anywhere validates -against the CRL before issuing credentials. Required permissions: -rolesanywhere:ImportCrl. +been revoked by the issuing certificate Authority (CA).In order to be properly imported, a +CRL must be in PEM format. IAM Roles Anywhere validates against the CRL before issuing +credentials. Required permissions: rolesanywhere:ImportCrl. # Arguments - `crl_data`: The x509 v3 specified certificate revocation list (CRL). @@ -733,6 +781,61 @@ function list_trust_anchors( ) end +""" + put_attribute_mapping(certificate_field, mapping_rules, profile_id) + put_attribute_mapping(certificate_field, mapping_rules, profile_id, params::Dict{String,<:Any}) + +Put an entry in the attribute mapping rules that will be enforced by a given profile. A +mapping specifies a certificate field and one or more specifiers that have contextual +meanings. + +# Arguments +- `certificate_field`: Fields (x509Subject, x509Issuer and x509SAN) within X.509 + certificates. +- `mapping_rules`: A list of mapping entries for every supported specifier or sub-field. +- `profile_id`: The unique identifier of the profile. + +""" +function put_attribute_mapping( + certificateField, + mappingRules, + profileId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rolesanywhere( + "PUT", + "/profiles/$(profileId)/mappings", + Dict{String,Any}( + "certificateField" => certificateField, "mappingRules" => mappingRules + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_attribute_mapping( + certificateField, + mappingRules, + profileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return rolesanywhere( + "PUT", + "/profiles/$(profileId)/mappings", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "certificateField" => certificateField, "mappingRules" => mappingRules + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_notification_settings(notification_settings, trust_anchor_id) put_notification_settings(notification_settings, trust_anchor_id, params::Dict{String,<:Any}) @@ -968,7 +1071,9 @@ permissions: rolesanywhere:UpdateProfile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"durationSeconds"`: The number of seconds the vended session credentials are valid for. +- `"durationSeconds"`: Used to determine how long sessions vended using this profile are + valid for. See the Expiration section of the CreateSession API documentation page for more + details. In requests, if this value is not provided, the default value will be 3600. - `"managedPolicyArns"`: A list of managed policy ARNs that apply to the vended session credentials. - `"name"`: The name of the profile. diff --git a/src/services/route53_recovery_cluster.jl b/src/services/route53_recovery_cluster.jl index 98f02f4550..ad19b72d9b 100644 --- a/src/services/route53_recovery_cluster.jl +++ b/src/services/route53_recovery_cluster.jl @@ -9,13 +9,13 @@ using AWS.UUIDs get_routing_control_state(routing_control_arn, params::Dict{String,<:Any}) Get the state for a routing control. A routing control is a simple on/off switch that you -can use to route traffic to cells. When a routing control state is On, traffic flows to a -cell. When the state is Off, traffic does not flow. Before you can create a routing -control, you must first create a cluster, and then host the control in a control panel on -the cluster. For more information, see Create routing control structures in the Amazon -Route 53 Application Recovery Controller Developer Guide. You access one of the endpoints -for the cluster to get or update the routing control state to redirect traffic for your -application. You must specify Regional endpoints when you work with API cluster +can use to route traffic to cells. When a routing control state is set to ON, traffic flows +to a cell. When the state is set to OFF, traffic does not flow. Before you can create a +routing control, you must first create a cluster, and then host the control in a control +panel on the cluster. For more information, see Create routing control structures in the +Amazon Route 53 Application Recovery Controller Developer Guide. You access one of the +endpoints for the cluster to get or update the routing control state to redirect traffic +for your application. You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC. To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer @@ -64,17 +64,17 @@ state for each routing control, along with the control panel name and control pa the routing controls. If you specify a control panel ARN, this call lists the routing controls in the control panel. Otherwise, it lists all the routing controls in the cluster. A routing control is a simple on/off switch in Route 53 ARC that you can use to route -traffic to cells. When a routing control state is On, traffic flows to a cell. When the -state is Off, traffic does not flow. Before you can create a routing control, you must -first create a cluster, and then host the control in a control panel on the cluster. For -more information, see Create routing control structures in the Amazon Route 53 Application -Recovery Controller Developer Guide. You access one of the endpoints for the cluster to get -or update the routing control state to redirect traffic for your application. You must -specify Regional endpoints when you work with API cluster operations to use this API -operation to list routing controls in Route 53 ARC. Learn more about working with routing -controls in the following topics in the Amazon Route 53 Application Recovery Controller -Developer Guide: Viewing and updating routing control states Working with routing -controls in Route 53 ARC +traffic to cells. When a routing control state is set to ON, traffic flows to a cell. When +the state is set to OFF, traffic does not flow. Before you can create a routing control, +you must first create a cluster, and then host the control in a control panel on the +cluster. For more information, see Create routing control structures in the Amazon Route +53 Application Recovery Controller Developer Guide. You access one of the endpoints for the +cluster to get or update the routing control state to redirect traffic for your +application. You must specify Regional endpoints when you work with API cluster +operations to use this API operation to list routing controls in Route 53 ARC. Learn more +about working with routing controls in the following topics in the Amazon Route 53 +Application Recovery Controller Developer Guide: Viewing and updating routing control +states Working with routing controls in Route 53 ARC # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -105,8 +105,8 @@ end update_routing_control_state(routing_control_arn, routing_control_state) update_routing_control_state(routing_control_arn, routing_control_state, params::Dict{String,<:Any}) -Set the state of the routing control to reroute traffic. You can set the value to be On or -Off. When the state is On, traffic flows to a cell. When the state is Off, traffic does not +Set the state of the routing control to reroute traffic. You can set the value to ON or +OFF. When the state is ON, traffic flows to a cell. When the state is OFF, traffic does not flow. With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the @@ -127,8 +127,8 @@ Working with routing controls overall # Arguments - `routing_control_arn`: The Amazon Resource Name (ARN) for the routing control that you want to update the state for. -- `routing_control_state`: The state of the routing control. You can set the value to be On - or Off. +- `routing_control_state`: The state of the routing control. You can set the value to ON or + OFF. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -180,8 +180,8 @@ end update_routing_control_states(update_routing_control_state_entries) update_routing_control_states(update_routing_control_state_entries, params::Dict{String,<:Any}) -Set multiple routing control states. You can set the value for each state to be On or Off. -When the state is On, traffic flows to a cell. When it's Off, traffic does not flow. With +Set multiple routing control states. You can set the value for each state to be ON or OFF. +When the state is ON, traffic flows to a cell. When it's OFF, traffic does not flow. With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the routing control diff --git a/src/services/route53_recovery_control_config.jl b/src/services/route53_recovery_control_config.jl index b79cf64d7c..1c926da9a6 100644 --- a/src/services/route53_recovery_control_config.jl +++ b/src/services/route53_recovery_control_config.jl @@ -498,6 +498,38 @@ function describe_safety_rule( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Get information about the resource policy for a cluster. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function get_resource_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return route53_recovery_control_config( + "GET", + "/resourcePolicy/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53_recovery_control_config( + "GET", + "/resourcePolicy/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_associated_route53_health_checks(routing_control_arn) list_associated_route53_health_checks(routing_control_arn, params::Dict{String,<:Any}) diff --git a/src/services/route53profiles.jl b/src/services/route53profiles.jl new file mode 100644 index 0000000000..7e4b5ee02f --- /dev/null +++ b/src/services/route53profiles.jl @@ -0,0 +1,643 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: route53profiles +using AWS.Compat +using AWS.UUIDs + +""" + associate_profile(name, profile_id, resource_id) + associate_profile(name, profile_id, resource_id, params::Dict{String,<:Any}) + + Associates a Route 53 Profiles profile with a VPC. A VPC can have only one Profile +associated with it, but a Profile can be associated with 1000 of VPCs (and you can request +a higher quota). For more information, see +https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-ent +ities. + +# Arguments +- `name`: A name for the association. +- `profile_id`: ID of the Profile. +- `resource_id`: The ID of the VPC. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: A list of the tag keys and values that you want to identify the Profile + association. +""" +function associate_profile( + Name, ProfileId, ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "POST", + "/profileassociation", + Dict{String,Any}( + "Name" => Name, "ProfileId" => ProfileId, "ResourceId" => ResourceId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_profile( + Name, + ProfileId, + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "POST", + "/profileassociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Name" => Name, "ProfileId" => ProfileId, "ResourceId" => ResourceId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_resource_to_profile(name, profile_id, resource_arn) + associate_resource_to_profile(name, profile_id, resource_arn, params::Dict{String,<:Any}) + + Associates a DNS reource configuration to a Route 53 Profile. + +# Arguments +- `name`: Name for the resource association. +- `profile_id`: ID of the Profile. +- `resource_arn`: Amazon resource number, ARN, of the DNS resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceProperties"`: If you are adding a DNS Firewall rule group, include also a + priority. The priority indicates the processing order for the rule groups, starting with + the priority assinged the lowest value. The allowed values for priority are between 100 + and 9900. +""" +function associate_resource_to_profile( + Name, ProfileId, ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "POST", + "/profileresourceassociation", + Dict{String,Any}( + "Name" => Name, "ProfileId" => ProfileId, "ResourceArn" => ResourceArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_resource_to_profile( + Name, + ProfileId, + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "POST", + "/profileresourceassociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Name" => Name, "ProfileId" => ProfileId, "ResourceArn" => ResourceArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_profile(client_token, name) + create_profile(client_token, name, params::Dict{String,<:Any}) + + Creates an empty Route 53 Profile. + +# Arguments +- `client_token`: ClientToken is an idempotency token that ensures a call to CreateProfile + completes only once. You choose the value to pass. For example, an issue might prevent you + from getting a response from CreateProfile. In this case, safely retry your call to + CreateProfile by using the same CreateProfile parameter value. +- `name`: A name for the Profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: A list of the tag keys and values that you want to associate with the Route 53 + Profile. +""" +function create_profile( + ClientToken, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "POST", + "/profile", + Dict{String,Any}("ClientToken" => ClientToken, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_profile( + ClientToken, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "POST", + "/profile", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ClientToken" => ClientToken, "Name" => Name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_profile(profile_id) + delete_profile(profile_id, params::Dict{String,<:Any}) + + Deletes the specified Route 53 Profile. Before you can delete a profile, you must first +disassociate it from all VPCs. + +# Arguments +- `profile_id`: The ID of the Profile that you want to delete. + +""" +function delete_profile(ProfileId; aws_config::AbstractAWSConfig=global_aws_config()) + return route53profiles( + "DELETE", + "/profile/$(ProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_profile( + ProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "DELETE", + "/profile/$(ProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_profile(profile_id, resource_id) + disassociate_profile(profile_id, resource_id, params::Dict{String,<:Any}) + + Dissociates a specified Route 53 Profile from the specified VPC. + +# Arguments +- `profile_id`: ID of the Profile. +- `resource_id`: The ID of the VPC. + +""" +function disassociate_profile( + ProfileId, ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "DELETE", + "/profileassociation/Profileid/$(ProfileId)/resourceid/$(ResourceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_profile( + ProfileId, + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "DELETE", + "/profileassociation/Profileid/$(ProfileId)/resourceid/$(ResourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_resource_from_profile(profile_id, resource_arn) + disassociate_resource_from_profile(profile_id, resource_arn, params::Dict{String,<:Any}) + + Dissoaciated a specified resource, from the Route 53 Profile. + +# Arguments +- `profile_id`: The ID of the Profile. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function disassociate_resource_from_profile( + ProfileId, ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "DELETE", + "/profileresourceassociation/profileid/$(ProfileId)/resourcearn/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_resource_from_profile( + ProfileId, + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "DELETE", + "/profileresourceassociation/profileid/$(ProfileId)/resourcearn/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_profile(profile_id) + get_profile(profile_id, params::Dict{String,<:Any}) + + Returns information about a specified Route 53 Profile, such as whether whether the +Profile is shared, and the current status of the Profile. + +# Arguments +- `profile_id`: ID of the Profile. + +""" +function get_profile(ProfileId; aws_config::AbstractAWSConfig=global_aws_config()) + return route53profiles( + "GET", + "/profile/$(ProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_profile( + ProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "GET", + "/profile/$(ProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_profile_association(profile_association_id) + get_profile_association(profile_association_id, params::Dict{String,<:Any}) + + Retrieves a Route 53 Profile association for a VPC. A VPC can have only one Profile +association, but a Profile can be associated with up to 5000 VPCs. + +# Arguments +- `profile_association_id`: The identifier of the association you want to get information + about. + +""" +function get_profile_association( + ProfileAssociationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", + "/profileassociation/$(ProfileAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_profile_association( + ProfileAssociationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "GET", + "/profileassociation/$(ProfileAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_profile_resource_association(profile_resource_association_id) + get_profile_resource_association(profile_resource_association_id, params::Dict{String,<:Any}) + + Returns information about a specified Route 53 Profile resource association. + +# Arguments +- `profile_resource_association_id`: The ID of the profile resource association that you + want to get information about. + +""" +function get_profile_resource_association( + ProfileResourceAssociationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", + "/profileresourceassociation/$(ProfileResourceAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_profile_resource_association( + ProfileResourceAssociationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "GET", + "/profileresourceassociation/$(ProfileResourceAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_profile_associations() + list_profile_associations(params::Dict{String,<:Any}) + + Lists all the VPCs that the specified Route 53 Profile is associated with. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects that you want to return for this request. + If more objects are available, in the response, a NextToken value, which you can use in a + subsequent call to get the next batch of objects, is provided. If you don't specify a + value for MaxResults, up to 100 objects are returned. +- `"nextToken"`: For the first call to this list request, omit this value. When you + request a list of objects, at most the number of objects specified by MaxResults is + returned. If more objects are available for retrieval, a NextToken value is returned in the + response. To retrieve the next batch of objects, use the token that was returned for the + prior request in your next request. +- `"profileId"`: ID of the Profile. +- `"resourceId"`: ID of the VPC. +""" +function list_profile_associations(; aws_config::AbstractAWSConfig=global_aws_config()) + return route53profiles( + "GET", + "/profileassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_profile_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", + "/profileassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_profile_resource_associations(profile_id) + list_profile_resource_associations(profile_id, params::Dict{String,<:Any}) + + Lists all the resource associations for the specified Route 53 Profile. + +# Arguments +- `profile_id`: The ID of the Profile. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects that you want to return for this request. + If more objects are available, in the response, a NextToken value, which you can use in a + subsequent call to get the next batch of objects, is provided. If you don't specify a + value for MaxResults, up to 100 objects are returned. +- `"nextToken"`: For the first call to this list request, omit this value. When you + request a list of objects, at most the number of objects specified by MaxResults is + returned. If more objects are available for retrieval, a NextToken value is returned in the + response. To retrieve the next batch of objects, use the token that was returned for the + prior request in your next request. +- `"resourceType"`: ID of a resource if you want information on only one type. +""" +function list_profile_resource_associations( + ProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", + "/profileresourceassociations/profileid/$(ProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_profile_resource_associations( + ProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "GET", + "/profileresourceassociations/profileid/$(ProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_profiles() + list_profiles(params::Dict{String,<:Any}) + + Lists all the Route 53 Profiles associated with your Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects that you want to return for this request. + If more objects are available, in the response, a NextToken value, which you can use in a + subsequent call to get the next batch of objects, is provided. If you don't specify a + value for MaxResults, up to 100 objects are returned. +- `"nextToken"`: For the first call to this list request, omit this value. When you + request a list of objects, at most the number of objects specified by MaxResults is + returned. If more objects are available for retrieval, a NextToken value is returned in the + response. To retrieve the next batch of objects, use the token that was returned for the + prior request in your next request. +""" +function list_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) + return route53profiles( + "GET", "/profiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_profiles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", "/profiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + + Lists the tags that you associated with the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) for the resource that you want to list + the tags for. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "GET", + "/tags/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "GET", + "/tags/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + + Adds one or more tags to a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) for the resource that you want to add + tags to. +- `tags`: The tags that you want to add to the specified resource. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return route53profiles( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}("Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "POST", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Tags" => Tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + + Removes one or more tags from a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) for the resource that you want to remove + tags from. +- `tag_keys`: The tags that you want to remove to the specified resource. + +""" +function untag_resource( + ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_profile_resource_association(profile_resource_association_id) + update_profile_resource_association(profile_resource_association_id, params::Dict{String,<:Any}) + + Updates the specified Route 53 Profile resourse association. + +# Arguments +- `profile_resource_association_id`: ID of the resource association. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Name"`: Name of the resource association. +- `"ResourceProperties"`: If you are adding a DNS Firewall rule group, include also a + priority. The priority indicates the processing order for the rule groups, starting with + the priority assinged the lowest value. The allowed values for priority are between 100 + and 9900. +""" +function update_profile_resource_association( + ProfileResourceAssociationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53profiles( + "PATCH", + "/profileresourceassociation/$(ProfileResourceAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_profile_resource_association( + ProfileResourceAssociationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53profiles( + "PATCH", + "/profileresourceassociation/$(ProfileResourceAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/route53resolver.jl b/src/services/route53resolver.jl index 328480383a..6d3ceb83dd 100644 --- a/src/services/route53resolver.jl +++ b/src/services/route53resolver.jl @@ -333,6 +333,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that's in the query doesn't exist. OVERRIDE - Provide a custom override in the response. This option requires custom handling details in the rule's BlockOverride* settings. This setting is required if the rule action setting is BLOCK. +- `"FirewallDomainRedirectionAction"`: How you want the the rule to evaluate DNS + redirection in the DNS redirection chain, such as CNAME or DNAME. + Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The + individual domains in the redirection chain must be added to the domain list. + Trust_Redirection_Domain inspects only the first domain in the redirection chain. You + don't need to add the subsequent domains in the domain in the redirection list to the + domain list. +- `"Qtype"`: The DNS query type you want the rule to evaluate. Allowed values are; A: + Returns an IPv4 address. AAAA: Returns an Ipv6 address. CAA: Restricts CAs that can + create SSL/TLS certifications for the domain. CNAME: Returns another domain name. DS: + Record that identifies the DNSSEC signing key of a delegated zone. MX: Specifies mail + servers. NAPTR: Regular-expression-based rewriting of domain names. NS: Authoritative + name servers. PTR: Maps an IP address to a domain name. SOA: Start of authority record + for the zone. SPF: Lists the servers authorized to send emails from a domain. SRV: + Application specific values that identify servers. TXT: Verifies email senders and + application-specific values. A query type you define by using the DNS type ID, for + example 28 for AAAA. The values must be defined as TYPENUMBER, where the NUMBER can be + 1-65334, for example, TYPE28. For more information, see List of DNS record types. """ function create_firewall_rule( Action, @@ -436,6 +454,75 @@ function create_firewall_rule_group( ) end +""" + create_outpost_resolver(creator_request_id, name, outpost_arn, preferred_instance_type) + create_outpost_resolver(creator_request_id, name, outpost_arn, preferred_instance_type, params::Dict{String,<:Any}) + +Creates a Route 53 Resolver on an Outpost. + +# Arguments +- `creator_request_id`: A unique string that identifies the request and that allows failed + requests to be retried without the risk of running the operation twice. CreatorRequestId + can be any unique string, for example, a date/time stamp. +- `name`: A friendly name that lets you easily find a configuration in the Resolver + dashboard in the Route 53 console. +- `outpost_arn`: The Amazon Resource Name (ARN) of the Outpost. If you specify this, you + must also specify a value for the PreferredInstanceType. +- `preferred_instance_type`: The Amazon EC2 instance type. If you specify this, you must + also specify a value for the OutpostArn. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceCount"`: Number of Amazon EC2 instances for the Resolver on Outpost. The + default and minimal value is 4. +- `"Tags"`: A string that helps identify the Route 53 Resolvers on Outpost. +""" +function create_outpost_resolver( + CreatorRequestId, + Name, + OutpostArn, + PreferredInstanceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53resolver( + "CreateOutpostResolver", + Dict{String,Any}( + "CreatorRequestId" => CreatorRequestId, + "Name" => Name, + "OutpostArn" => OutpostArn, + "PreferredInstanceType" => PreferredInstanceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_outpost_resolver( + CreatorRequestId, + Name, + OutpostArn, + PreferredInstanceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53resolver( + "CreateOutpostResolver", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CreatorRequestId" => CreatorRequestId, + "Name" => Name, + "OutpostArn" => OutpostArn, + "PreferredInstanceType" => PreferredInstanceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_resolver_endpoint(creator_request_id, direction, ip_addresses, security_group_ids) create_resolver_endpoint(creator_request_id, direction, ip_addresses, security_group_ids, params::Dict{String,<:Any}) @@ -454,19 +541,34 @@ service for a VPC to your network. the DNS service for a VPC to your network - `ip_addresses`: The subnets and IP addresses in your VPC that DNS queries originate from (for outbound endpoints) or that you forward DNS queries to (for inbound endpoints). The - subnet ID uniquely identifies a VPC. + subnet ID uniquely identifies a VPC. Even though the minimum is 1, Route 53 requires + that you create at least two. - `security_group_ids`: The ID of one or more security groups that you want to use to control access to this VPC. The security group that you specify must include one or more inbound rules (for inbound Resolver endpoints) or outbound rules (for outbound Resolver endpoints). Inbound and outbound rules must allow TCP and UDP access. For inbound access, open port 53. For outbound access, open the port that you're using for DNS queries on your - network. + network. Some security group rules will cause your connection to be tracked. For outbound + resolver endpoint, it can potentially impact the maximum queries per second from outbound + endpoint to your target name server. For inbound resolver endpoint, it can bring down the + overall maximum queries per second per IP address to as low as 1500. To avoid connection + tracking caused by security group, see Untracked connections. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Name"`: A friendly name that lets you easily find a configuration in the Resolver dashboard in the Route 53 console. -- `"ResolverEndpointType"`: For the endpoint type you can choose either IPv4, IPv6. or +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. If you specify this, you + must also specify a value for the PreferredInstanceType. +- `"PreferredInstanceType"`: The instance type. If you specify this, you must also specify + a value for the OutpostArn. +- `"Protocols"`: The protocols you want to use for the endpoint. DoH-FIPS is applicable + for inbound endpoints only. For an inbound endpoint you can apply the protocols as + follows: Do53 and DoH in combination. Do53 and DoH-FIPS in combination. Do53 alone. + DoH alone. DoH-FIPS alone. None, which is treated as Do53. For an outbound endpoint + you can apply the protocols as follows: Do53 and DoH in combination. Do53 alone. DoH + alone. None, which is treated as Do53. +- `"ResolverEndpointType"`: For the endpoint type you can choose either IPv4, IPv6, or dual-stack. A dual-stack endpoint means that it will resolve via both IPv4 and IPv6. This endpoint type is applied to all IP addresses. - `"Tags"`: A list of the tag keys and values that you want to associate with the endpoint. @@ -593,8 +695,8 @@ function create_resolver_query_log_config( end """ - create_resolver_rule(creator_request_id, domain_name, rule_type) - create_resolver_rule(creator_request_id, domain_name, rule_type, params::Dict{String,<:Any}) + create_resolver_rule(creator_request_id, rule_type) + create_resolver_rule(creator_request_id, rule_type, params::Dict{String,<:Any}) For DNS queries that originate in your VPCs, specifies which Resolver endpoint the queries pass through, one domain name that you want to forward to your network, and the IP @@ -604,10 +706,6 @@ addresses of the DNS resolvers in your network. - `creator_request_id`: A unique string that identifies the request and that allows failed requests to be retried without the risk of running the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp. -- `domain_name`: DNS queries for this domain name are forwarded to the IP addresses that - you specify in TargetIps. If a query matches multiple Resolver rules (example.com and - www.example.com), outbound DNS queries are routed using the Resolver rule that contains the - most specific domain name (www.example.com). - `rule_type`: When you want to forward DNS queries for specified domain name to resolvers on your network, specify FORWARD. When you have a forwarding rule to forward DNS queries for a domain to your network and you want Resolver to process queries for a subdomain of @@ -619,35 +717,31 @@ addresses of the DNS resolvers in your network. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DomainName"`: DNS queries for this domain name are forwarded to the IP addresses that + you specify in TargetIps. If a query matches multiple Resolver rules (example.com and + www.example.com), outbound DNS queries are routed using the Resolver rule that contains the + most specific domain name (www.example.com). - `"Name"`: A friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console. - `"ResolverEndpointId"`: The ID of the outbound Resolver endpoint that you want to use to route DNS queries to the IP addresses that you specify in TargetIps. - `"Tags"`: A list of the tag keys and values that you want to associate with the endpoint. - `"TargetIps"`: The IPs that you want Resolver to forward DNS queries to. You can specify - only IPv4 addresses. Separate IP addresses with a space. TargetIps is available only when - the value of Rule type is FORWARD. + either Ipv4 or Ipv6 addresses but not both in the same rule. Separate IP addresses with a + space. TargetIps is available only when the value of Rule type is FORWARD. """ function create_resolver_rule( - CreatorRequestId, - DomainName, - RuleType; - aws_config::AbstractAWSConfig=global_aws_config(), + CreatorRequestId, RuleType; aws_config::AbstractAWSConfig=global_aws_config() ) return route53resolver( "CreateResolverRule", - Dict{String,Any}( - "CreatorRequestId" => CreatorRequestId, - "DomainName" => DomainName, - "RuleType" => RuleType, - ); + Dict{String,Any}("CreatorRequestId" => CreatorRequestId, "RuleType" => RuleType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_resolver_rule( CreatorRequestId, - DomainName, RuleType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -658,9 +752,7 @@ function create_resolver_rule( mergewith( _merge, Dict{String,Any}( - "CreatorRequestId" => CreatorRequestId, - "DomainName" => DomainName, - "RuleType" => RuleType, + "CreatorRequestId" => CreatorRequestId, "RuleType" => RuleType ), params, ), @@ -720,6 +812,19 @@ Deletes the specified firewall rule. - `firewall_rule_group_id`: The unique identifier of the firewall rule group that you want to delete the rule from. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Qtype"`: The DNS query type that the rule you are deleting evaluates. Allowed values + are; A: Returns an IPv4 address. AAAA: Returns an Ipv6 address. CAA: Restricts CAs + that can create SSL/TLS certifications for the domain. CNAME: Returns another domain + name. DS: Record that identifies the DNSSEC signing key of a delegated zone. MX: + Specifies mail servers. NAPTR: Regular-expression-based rewriting of domain names. NS: + Authoritative name servers. PTR: Maps an IP address to a domain name. SOA: Start of + authority record for the zone. SPF: Lists the servers authorized to send emails from a + domain. SRV: Application specific values that identify servers. TXT: Verifies email + senders and application-specific values. A query type you define by using the DNS type + ID, for example 28 for AAAA. The values must be defined as TYPENUMBER, where the NUMBER can + be 1-65334, for example, TYPE28. For more information, see List of DNS record types. """ function delete_firewall_rule( FirewallDomainListId, @@ -799,6 +904,35 @@ function delete_firewall_rule_group( ) end +""" + delete_outpost_resolver(id) + delete_outpost_resolver(id, params::Dict{String,<:Any}) + +Deletes a Resolver on the Outpost. + +# Arguments +- `id`: A unique string that identifies the Resolver on the Outpost. + +""" +function delete_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "DeleteOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "DeleteOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_resolver_endpoint(resolver_endpoint_id) delete_resolver_endpoint(resolver_endpoint_id, params::Dict{String,<:Any}) @@ -1306,6 +1440,36 @@ function get_firewall_rule_group_policy( ) end +""" + get_outpost_resolver(id) + get_outpost_resolver(id, params::Dict{String,<:Any}) + +Gets information about a specified Resolver on the Outpost, such as its instance count and +type, name, and the current status of the Resolver. + +# Arguments +- `id`: The ID of the Resolver on the Outpost. + +""" +function get_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "GetOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "GetOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_resolver_config(resource_id) get_resolver_config(resource_id, params::Dict{String,<:Any}) @@ -1984,6 +2148,37 @@ function list_firewall_rules( ) end +""" + list_outpost_resolvers() + list_outpost_resolvers(params::Dict{String,<:Any}) + +Lists all the Resolvers on Outposts that were created using the current Amazon Web Services +account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of Resolvers on the Outpost that you want to return in + the response to a ListOutpostResolver request. If you don't specify a value for MaxResults, + the request returns up to 100 Resolvers. +- `"NextToken"`: For the first ListOutpostResolver request, omit this value. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +""" +function list_outpost_resolvers(; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "ListOutpostResolvers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_outpost_resolvers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "ListOutpostResolvers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_resolver_configs() list_resolver_configs(params::Dict{String,<:Any}) @@ -2451,7 +2646,6 @@ operations that you want the account to be able to perform on the configuration. can specify the following operations in the Actions section of the statement: route53resolver:AssociateResolverQueryLogConfig route53resolver:DisassociateResolverQueryLogConfig - route53resolver:ListResolverQueryLogConfigAssociations route53resolver:ListResolverQueryLogConfigs In the Resource section of the statement, you specify the ARNs for the query logging configurations that you want to share with the account that you specified in Arn. @@ -2772,6 +2966,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys but no response is available for it. NXDOMAIN - Respond indicating that the domain name that's in the query doesn't exist. OVERRIDE - Provide a custom override in the response. This option requires custom handling details in the rule's BlockOverride* settings. +- `"FirewallDomainRedirectionAction"`: How you want the the rule to evaluate DNS + redirection in the DNS redirection chain, such as CNAME or DNAME. + Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The + individual domains in the redirection chain must be added to the domain list. + Trust_Redirection_Domain inspects only the first domain in the redirection chain. You + don't need to add the subsequent domains in the domain in the redirection list to the + domain list. - `"Name"`: The name of the rule. - `"Priority"`: The setting that determines the processing order of the rule in the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from @@ -2779,6 +2980,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys make it easier to insert rules later, leave space between the numbers, for example, use 100, 200, and so on. You can change the priority setting for the rules in a rule group at any time. +- `"Qtype"`: The DNS query type you want the rule to evaluate. Allowed values are; A: + Returns an IPv4 address. AAAA: Returns an Ipv6 address. CAA: Restricts CAs that can + create SSL/TLS certifications for the domain. CNAME: Returns another domain name. DS: + Record that identifies the DNSSEC signing key of a delegated zone. MX: Specifies mail + servers. NAPTR: Regular-expression-based rewriting of domain names. NS: Authoritative + name servers. PTR: Maps an IP address to a domain name. SOA: Start of authority record + for the zone. SPF: Lists the servers authorized to send emails from a domain. SRV: + Application specific values that identify servers. TXT: Verifies email senders and + application-specific values. A query type you define by using the DNS type ID, for + example 28 for AAAA. The values must be defined as TYPENUMBER, where the NUMBER can be + 1-65334, for example, TYPE28. For more information, see List of DNS record types. """ function update_firewall_rule( FirewallDomainListId, @@ -2874,6 +3086,41 @@ function update_firewall_rule_group_association( ) end +""" + update_outpost_resolver(id) + update_outpost_resolver(id, params::Dict{String,<:Any}) + +You can use UpdateOutpostResolver to update the instance count, type, or name of a Resolver +on an Outpost. + +# Arguments +- `id`: A unique string that identifies Resolver on an Outpost. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceCount"`: The Amazon EC2 instance count for a Resolver on the Outpost. +- `"Name"`: Name of the Resolver on the Outpost. +- `"PreferredInstanceType"`: Amazon EC2 instance type. +""" +function update_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "UpdateOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "UpdateOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_resolver_config(autodefined_reverse_flag, resource_id) update_resolver_config(autodefined_reverse_flag, resource_id, params::Dict{String,<:Any}) @@ -2977,7 +3224,7 @@ end update_resolver_endpoint(resolver_endpoint_id) update_resolver_endpoint(resolver_endpoint_id, params::Dict{String,<:Any}) -Updates the name, or enpoint type for an inbound or an outbound Resolver endpoint. You can +Updates the name, or endpoint type for an inbound or an outbound Resolver endpoint. You can only update between IPV4 and DUALSTACK, IPV6 endpoint type can't be updated to other type. # Arguments @@ -2986,9 +3233,22 @@ only update between IPV4 and DUALSTACK, IPV6 endpoint type can't be updated to o # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Name"`: The name of the Resolver endpoint that you want to update. +- `"Protocols"`: The protocols you want to use for the endpoint. DoH-FIPS is applicable + for inbound endpoints only. For an inbound endpoint you can apply the protocols as + follows: Do53 and DoH in combination. Do53 and DoH-FIPS in combination. Do53 alone. + DoH alone. DoH-FIPS alone. None, which is treated as Do53. For an outbound endpoint + you can apply the protocols as follows: Do53 and DoH in combination. Do53 alone. DoH + alone. None, which is treated as Do53. You can't change the protocol of an inbound + endpoint directly from only Do53 to only DoH, or DoH-FIPS. This is to prevent a sudden + disruption to incoming traffic that relies on Do53. To change the protocol from Do53 to + DoH, or DoH-FIPS, you must first enable both Do53 and DoH, or Do53 and DoH-FIPS, to make + sure that all incoming traffic has transferred to using the DoH protocol, or DoH-FIPS, and + then remove the Do53. - `"ResolverEndpointType"`: Specifies the endpoint type for what type of IP address the - endpoint uses to forward DNS queries. -- `"UpdateIpAddresses"`: Updates the Resolver endpoint type to IpV4, Ipv6, or dual-stack. + endpoint uses to forward DNS queries. Updating to IPV6 type isn't currently supported. +- `"UpdateIpAddresses"`: Specifies the IPv6 address when you update the Resolver endpoint + from IPv4 to dual-stack. If you don't specify an IPv6 address, one will be automatically + chosen from your subnet. """ function update_resolver_endpoint( ResolverEndpointId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/route_53.jl b/src/services/route_53.jl index e082273c5e..2af77fcb88 100644 --- a/src/services/route_53.jl +++ b/src/services/route_53.jl @@ -181,21 +181,22 @@ For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Developer Guide. Create, Delete, and Upsert Use ChangeResourceRecordsSetsRequest to perform the following actions: CREATE: Creates a resource record set that has the specified values. DELETE: Deletes an existing resource record set that has the specified -values. UPSERT: If a resource set exists Route 53 updates it with the values in the -request. Syntaxes for Creating, Updating, and Deleting Resource Record Sets The syntax -for a request depends on the type of resource record set that you want to create, delete, -or update, such as weighted, alias, or failover. The XML elements in your request must -appear in the order listed in the syntax. For an example for each type of resource record -set, see \"Examples.\" Don't refer to the syntax in the \"Parameter Syntax\" section, which -includes all of the elements for every kind of resource record set that you can create, -delete, or update by using ChangeResourceRecordSets. Change Propagation to Route 53 DNS -Servers When you submit a ChangeResourceRecordSets request, Route 53 propagates your -changes to all of the Route 53 authoritative DNS servers. While your changes are -propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange -returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within -60 seconds. For more information, see GetChange. Limits on ChangeResourceRecordSets -Requests For information about the limits on a ChangeResourceRecordSets request, see -Limits in the Amazon Route 53 Developer Guide. +values. UPSERT: If a resource set doesn't exist, Route 53 creates it. If a resource set +exists Route 53 updates it with the values in the request. Syntaxes for Creating, +Updating, and Deleting Resource Record Sets The syntax for a request depends on the type +of resource record set that you want to create, delete, or update, such as weighted, alias, +or failover. The XML elements in your request must appear in the order listed in the +syntax. For an example for each type of resource record set, see \"Examples.\" Don't refer +to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for +every kind of resource record set that you can create, delete, or update by using +ChangeResourceRecordSets. Change Propagation to Route 53 DNS Servers When you submit a +ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 +authoritative DNS servers managing the hosted zone. While your changes are propagating, +GetChange returns a status of PENDING. When propagation is complete, GetChange returns a +status of INSYNC. Changes generally propagate to all Route 53 name servers managing the +hosted zone within 60 seconds. For more information, see GetChange. Limits on +ChangeResourceRecordSets Requests For information about the limits on a +ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide. # Arguments - `change_batch`: A complex type that contains an optional comment and the Changes element. @@ -356,7 +357,9 @@ metrics and alarms by using the CloudWatch console, see the Amazon CloudWatch Us CallerReference as an existing health check but with different settings, Route 53 returns a HealthCheckAlreadyExists error. If you send a CreateHealthCheck request with a unique CallerReference but settings identical to an existing health check, Route 53 creates the - health check. + health check. Route 53 does not store the CallerReference for a deleted health check + indefinitely. The CallerReference for a deleted health check will be deleted after a number + of days. - `health_check_config`: A complex type that contains settings for a new health check. """ @@ -447,6 +450,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DelegationSetId"`: If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet. + If you are using a reusable delegation set to create a public hosted zone for a subdomain, + make sure that the parent hosted zone doesn't use one or more of the same name servers. If + you have overlapping nameservers, the operation will cause a ConflictingDomainsExist error. - `"HostedZoneConfig"`: (Optional) A complex type that contains the following optional values: For public and private hosted zones, an optional comment For private hosted zones, an optional PrivateZone element If you don't specify a comment or the PrivateZone @@ -808,6 +814,11 @@ specified traffic policy version. In addition, CreateTrafficPolicyInstance assoc resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created. +After you submit an CreateTrafficPolicyInstance request, there's a brief delay while Amazon +Route 53 creates the resource record sets that are specified in the traffic policy +definition. Use GetTrafficPolicyInstance with the id of new traffic policy instance to +confirm that the CreateTrafficPolicyInstance request completed successfully. For more +information, see the State response element. # Arguments - `hosted_zone_id`: The ID of the hosted zone that you want Amazon Route 53 to create @@ -1534,8 +1545,9 @@ end Returns the current status of a change batch request. The status is one of the following values: PENDING indicates that the changes in this request have not propagated to all -Amazon Route 53 DNS servers. This is the initial status of all change batch requests. -INSYNC indicates that the changes have propagated to all Route 53 DNS servers. +Amazon Route 53 DNS servers managing the hosted zone. This is the initial status of all +change batch requests. INSYNC indicates that the changes have propagated to all Route 53 +DNS servers managing the hosted zone. # Arguments - `id`: The ID of the change batch request. The value that you specify here is the value @@ -1646,7 +1658,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Africa AN: Antarctica AS: Asia EU: Europe OC: Oceania NA: North America SA: South America - `"countrycode"`: Amazon Route 53 uses the two-letter country codes that are specified in - ISO standard 3166-1 alpha-2. + ISO standard 3166-1 alpha-2. Route 53 also supports the country code UA for Ukraine. - `"subdivisioncode"`: The code for the subdivision, such as a particular state within the United States. For a list of US state abbreviations, see Appendix B: Two–Letter State and Possession Abbreviations on the United States Postal Service website. For a list of all @@ -2054,11 +2066,11 @@ end get_traffic_policy_instance(id) get_traffic_policy_instance(id, params::Dict{String,<:Any}) -Gets information about a specified traffic policy instance. After you submit a -CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief -delay while Amazon Route 53 creates the resource record sets that are specified in the -traffic policy definition. For more information, see the State response element. In the -Route 53 console, traffic policy instances are known as policy records. +Gets information about a specified traffic policy instance. Use GetTrafficPolicyInstance +with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance +or an UpdateTrafficPolicyInstance request completed successfully. For more information, see +the State response element. In the Route 53 console, traffic policy instances are known +as policy records. # Arguments - `id`: The ID of the traffic policy instance that you want to get information about. @@ -2299,9 +2311,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys request. If the value of IsTruncated in the previous response was false, there are no more health checks to get. - `"maxitems"`: The maximum number of health checks that you want ListHealthChecks to - return in response to the current request. Amazon Route 53 returns a maximum of 100 items. - If you set MaxItems to a value greater than 100, Route 53 returns only the first 100 health - checks. + return in response to the current request. Amazon Route 53 returns a maximum of 1000 items. + If you set MaxItems to a value greater than 1000, Route 53 returns only the first 1000 + health checks. """ function list_health_checks(; aws_config::AbstractAWSConfig=global_aws_config()) return route_53( @@ -2338,6 +2350,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"delegationsetid"`: If you're using reusable delegation sets and you want to list all of the hosted zones that are associated with a reusable delegation set, specify the ID of that reusable delegation set. +- `"hostedzonetype"`: (Optional) Specifies if the hosted zone is private. - `"marker"`: If the value of IsTruncated in the previous response was true, you have more hosted zones. To get more hosted zones, submit another ListHostedZones request. For the value of marker, specify the value of NextMarker from the previous response, which is the @@ -3098,7 +3111,9 @@ end Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask. This call only supports querying public -hosted zones. +hosted zones. The TestDnsAnswer returns information similar to what you would expect from +the answer section of the dig command. Therefore, if you query for the name servers of a +subdomain that point to the parent name servers, those will not be returned. # Arguments - `hostedzoneid`: The ID of the hosted zone that you want Amazon Route 53 to simulate a @@ -3233,22 +3248,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys each of the above cases. If you don't specify a value for IPAddress: If you don't specify a value for IPAddress, Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval. Using an IPv4 - address that is returned by DNS, Route 53 then checks the health of the endpoint. If you - don't specify a value for IPAddress, Route 53 uses only IPv4 to send health checks to the - endpoint. If there's no resource record set with a type of A for the name that you specify - for FullyQualifiedDomainName, the health check fails with a \"DNS resolution failed\" - error. If you want to check the health of weighted, latency, or failover resource record - sets and you choose to specify the endpoint only by FullyQualifiedDomainName, we recommend - that you create a separate health check for each endpoint. For example, create a health - check for each HTTP server that is serving content for www.example.com. For the value of - FullyQualifiedDomainName, specify the domain name of the server (such as - us-east-2-www.example.com), not the name of the resource record sets (www.example.com). In - this configuration, if the value of FullyQualifiedDomainName matches the name of the - resource record sets and you then associate the health check with those resource record - sets, health check results will be unpredictable. In addition, if the value of Type is - HTTP, HTTPS, HTTP_STR_MATCH, or HTTPS_STR_MATCH, Route 53 passes the value of - FullyQualifiedDomainName in the Host header, as it does when you specify a value for - IPAddress. If the value of Type is TCP, Route 53 doesn't pass a Host header. + address that is returned by DNS, Route 53 then checks the health of the endpoint. If you + don't specify a value for IPAddress, you can’t update the health check to remove the + FullyQualifiedDomainName; if you don’t specify a value for IPAddress on creation, a + FullyQualifiedDomainName is required. If you don't specify a value for IPAddress, Route 53 + uses only IPv4 to send health checks to the endpoint. If there's no resource record set + with a type of A for the name that you specify for FullyQualifiedDomainName, the health + check fails with a \"DNS resolution failed\" error. If you want to check the health of + weighted, latency, or failover resource record sets and you choose to specify the endpoint + only by FullyQualifiedDomainName, we recommend that you create a separate health check for + each endpoint. For example, create a health check for each HTTP server that is serving + content for www.example.com. For the value of FullyQualifiedDomainName, specify the domain + name of the server (such as us-east-2-www.example.com), not the name of the resource record + sets (www.example.com). In this configuration, if the value of FullyQualifiedDomainName + matches the name of the resource record sets and you then associate the health check with + those resource record sets, health check results will be unpredictable. In addition, if + the value of Type is HTTP, HTTPS, HTTP_STR_MATCH, or HTTPS_STR_MATCH, Route 53 passes the + value of FullyQualifiedDomainName in the Host header, as it does when you specify a value + for IPAddress. If the value of Type is TCP, Route 53 doesn't pass a Host header. - `"HealthCheckVersion"`: A sequential counter that Amazon Route 53 sets to 1 when you create a health check and increments by 1 each time you update settings for the health check. We recommend that you use GetHealthCheck or ListHealthChecks to get the current @@ -3421,17 +3438,22 @@ end update_traffic_policy_instance(id, ttl, traffic_policy_id, traffic_policy_version) update_traffic_policy_instance(id, ttl, traffic_policy_id, traffic_policy_version, params::Dict{String,<:Any}) -Updates the resource record sets in a specified hosted zone that were created based on the -settings in a specified traffic policy version. When you update a traffic policy instance, -Amazon Route 53 continues to respond to DNS queries for the root resource record set name -(such as example.com) while it replaces one group of resource record sets with another. -Route 53 performs the following operations: Route 53 creates a new group of resource -record sets based on the specified traffic policy. This is true regardless of how -significant the differences are between the existing resource record sets and the new -resource record sets. When all of the new resource record sets have been created, Route -53 starts to respond to DNS queries for the root resource record set name (such as -example.com) by using the new resource record sets. Route 53 deletes the old group of -resource record sets that are associated with the root resource record set name. + After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while +Route 53 creates the resource record sets that are specified in the traffic policy +definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance +confirm that the UpdateTrafficPolicyInstance request completed successfully. For more +information, see the State response element. Updates the resource record sets in a +specified hosted zone that were created based on the settings in a specified traffic policy +version. When you update a traffic policy instance, Amazon Route 53 continues to respond to +DNS queries for the root resource record set name (such as example.com) while it replaces +one group of resource record sets with another. Route 53 performs the following operations: + Route 53 creates a new group of resource record sets based on the specified traffic +policy. This is true regardless of how significant the differences are between the existing +resource record sets and the new resource record sets. When all of the new resource +record sets have been created, Route 53 starts to respond to DNS queries for the root +resource record set name (such as example.com) by using the new resource record sets. +Route 53 deletes the old group of resource record sets that are associated with the root +resource record set name. # Arguments - `id`: The ID of the traffic policy instance that you want to update. diff --git a/src/services/route_53_domains.jl b/src/services/route_53_domains.jl index 722d025da1..b5b139de05 100644 --- a/src/services/route_53_domains.jl +++ b/src/services/route_53_domains.jl @@ -744,7 +744,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value of NextPageMarker in the Marker element. - `"MaxItems"`: Number of domains to be returned. Default: 20 - `"SortBy"`: The sort type for returned values. -- `"SortOrder"`: The sort order ofr returned values, either ascending or descending. +- `"SortOrder"`: The sort order for returned values, either ascending or descending. - `"Status"`: The status of the operations. - `"SubmittedSince"`: An optional parameter that lets you get information about all the operations that you submitted after a specified date and time. Specify the date and time in @@ -878,24 +878,22 @@ end register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -This operation registers a domain. Domains are registered either by Amazon Registrar (for -.com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). -For some top-level domains (TLDs), this operation requires extra parameters. When you -register a domain, Amazon Route 53 does the following: Creates a Route 53 hosted zone -that has the same name as the domain. Route 53 assigns four name servers to your hosted -zone and automatically updates your domain registration with the names of these name -servers. Enables auto renew, so your domain registration will renew automatically each -year. We'll notify you in advance of the renewal date so you can choose whether to renew -the registration. Optionally enables privacy protection, so WHOIS queries return contact -information either for Amazon Registrar (for .com, .net, and .org domains) or for our -registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, -WHOIS queries return the information that you entered for the administrative, registrant, -and technical contacts. You must specify the same privacy setting for the administrative, -registrant, and technical contacts. If registration is successful, returns an operation -ID that you can use to track the progress and completion of the action. If the request is -not completed successfully, the domain registrant is notified by email. Charges your -Amazon Web Services account an amount based on the top-level domain. For more information, -see Amazon Route 53 Pricing. +This operation registers a domain. For some top-level domains (TLDs), this operation +requires extra parameters. When you register a domain, Amazon Route 53 does the following: + Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four +name servers to your hosted zone and automatically updates your domain registration with +the names of these name servers. Enables auto renew, so your domain registration will +renew automatically each year. We'll notify you in advance of the renewal date so you can +choose whether to renew the registration. Optionally enables privacy protection, so WHOIS +queries return contact for the registrar or the phrase \"REDACTED FOR PRIVACY\", or \"On +behalf of <domain name> owner.\" If you don't enable privacy protection, WHOIS +queries return the information that you entered for the administrative, registrant, and +technical contacts. While some domains may allow different privacy settings per contact, +we recommend specifying the same privacy setting for all contacts. If registration is +successful, returns an operation ID that you can use to track the progress and completion +of the action. If the request is not completed successfully, the domain registrant is +notified by email. Charges your Amazon Web Services account an amount based on the +top-level domain. For more information, see Amazon Route 53 Pricing. # Arguments - `admin_contact`: Provides detailed contact information. For information about the values @@ -923,26 +921,33 @@ see Amazon Route 53 Pricing. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoRenew"`: Indicates whether the domain will be automatically renewed (true) or not (false). Auto renewal only takes effect after the account is charged. Default: true +- `"BillingContact"`: Provides detailed contact information. For information about the + values that you specify for each element, see ContactDetail. - `"IdnLangCode"`: Reserved for future use. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the admin contact. You must specify the same privacy - setting for the administrative, registrant, and technical contacts. Default: true + either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, + WHOIS queries return the information that you entered for the admin contact. You must + specify the same privacy setting for the administrative, billing, registrant, and technical + contacts. Default: true +- `"PrivacyProtectBillingContact"`: Whether you want to conceal contact information from + WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information + either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, + WHOIS queries return the information that you entered for the billing contact. You must + specify the same privacy setting for the administrative, billing, registrant, and technical + contacts. - `"PrivacyProtectRegistrantContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the registrant contact (the domain owner). You must - specify the same privacy setting for the administrative, registrant, and technical - contacts. Default: true + either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, + WHOIS queries return the information that you entered for the registrant contact (the + domain owner). You must specify the same privacy setting for the administrative, billing, + registrant, and technical contacts. Default: true - `"PrivacyProtectTechContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either - for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, - Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that - you entered for the technical contact. You must specify the same privacy setting for the - administrative, registrant, and technical contacts. Default: true + for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS + queries return the information that you entered for the technical contact. You must + specify the same privacy setting for the administrative, billing, registrant, and technical + contacts. Default: true """ function register_domain( AdminContact, @@ -1199,27 +1204,28 @@ end transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -Transfers a domain from another registrar to Amazon Route 53. When the transfer is -complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org -domains) or with our registrar associate, Gandi (for all other TLDs). For more information -about transferring domains, see the following topics: For transfer requirements, a -detailed procedure, and information about viewing the status of a domain that you're -transferring to Route 53, see Transferring Registration for a Domain to Amazon Route 53 in -the Amazon Route 53 Developer Guide. For information about how to transfer a domain from -one Amazon Web Services account to another, see TransferDomainToAnotherAwsAccount. For -information about how to transfer a domain to another domain registrar, see Transferring a -Domain from Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. -If the registrar for your domain is also the DNS service provider for the domain, we highly -recommend that you transfer your DNS service to Route 53 or to another DNS service provider -before you transfer your registration. Some registrars provide free DNS service when you -purchase a domain registration. When you transfer the registration, the previous registrar -will not renew your domain registration and could end your DNS service at any time. If the -registrar for your domain is also the DNS service provider for the domain and you don't -transfer DNS service to another provider, your website, email, and the web applications -associated with the domain might become unavailable. If the transfer is successful, this -method returns an operation ID that you can use to track the progress and completion of the -action. If the transfer doesn't complete successfully, the domain registrant will be -notified by email. +Transfers a domain from another registrar to Amazon Route 53. For more information about +transferring domains, see the following topics: For transfer requirements, a detailed +procedure, and information about viewing the status of a domain that you're transferring to +Route 53, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route +53 Developer Guide. For information about how to transfer a domain from one Amazon Web +Services account to another, see TransferDomainToAnotherAwsAccount. For information +about how to transfer a domain to another domain registrar, see Transferring a Domain from +Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. During the +transfer of any country code top-level domains (ccTLDs) to Route 53, except for .cc and +.tv, updates to the owner contact are ignored and the owner contact data from the registry +is used. You can update the owner contact after the transfer is complete. For more +information, see UpdateDomainContact. If the registrar for your domain is also the DNS +service provider for the domain, we highly recommend that you transfer your DNS service to +Route 53 or to another DNS service provider before you transfer your registration. Some +registrars provide free DNS service when you purchase a domain registration. When you +transfer the registration, the previous registrar will not renew your domain registration +and could end your DNS service at any time. If the registrar for your domain is also the +DNS service provider for the domain and you don't transfer DNS service to another provider, +your website, email, and the web applications associated with the domain might become +unavailable. If the transfer is successful, this method returns an operation ID that you +can use to track the progress and completion of the action. If the transfer doesn't +complete successfully, the domain registrant will be notified by email. # Arguments - `admin_contact`: Provides detailed contact information. @@ -1242,27 +1248,32 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys registrar. - `"AutoRenew"`: Indicates whether the domain will be automatically renewed (true) or not (false). Auto renewal only takes effect after the account is charged. Default: true +- `"BillingContact"`: Provides detailed contact information. - `"IdnLangCode"`: Reserved for future use. - `"Nameservers"`: Contains details for the host and glue IP addresses. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the admin contact. You must specify the same privacy - setting for the administrative, registrant, and technical contacts. Default: true + for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain + name> owner.\". While some domains may allow different privacy settings per contact, we + recommend specifying the same privacy setting for all contacts. Default: true +- `"PrivacyProtectBillingContact"`: Whether you want to conceal contact information from + WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information + either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, + WHOIS queries return the information that you entered for the billing contact. You must + specify the same privacy setting for the administrative, billing, registrant, and technical + contacts. - `"PrivacyProtectRegistrantContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the registrant contact (domain owner). You must specify - the same privacy setting for the administrative, registrant, and technical contacts. - Default: true + either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, + WHOIS queries return the information that you entered for the registrant contact (domain + owner). You must specify the same privacy setting for the administrative, billing, + registrant, and technical contacts. Default: true - `"PrivacyProtectTechContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either - for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, - Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that - you entered for the technical contact. You must specify the same privacy setting for the - administrative, registrant, and technical contacts. Default: true + for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS + queries return the information that you entered for the technical contact. You must + specify the same privacy setting for the administrative, billing, registrant, and technical + contacts. Default: true """ function transfer_domain( AdminContact, @@ -1387,7 +1398,9 @@ domain registrant will be notified by email. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdminContact"`: Provides detailed contact information. -- `"Consent"`: Customer's consent for the owner change request. +- `"BillingContact"`: Provides detailed contact information. +- `"Consent"`: Customer's consent for the owner change request. Required if the domain is + not free (consent price is more than 0.00). - `"RegistrantContact"`: Provides detailed contact information. - `"TechContact"`: Provides detailed contact information. """ @@ -1421,11 +1434,11 @@ end update_domain_contact_privacy(domain_name, params::Dict{String,<:Any}) This operation updates the specified domain contact's privacy setting. When privacy -protection is enabled, contact information such as email address is replaced either with -contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact -information for our registrar associate, Gandi. You must specify the same privacy setting -for the administrative, registrant, and technical contacts. This operation affects only -the contact information for the specified contact type (administrative, registrant, or +protection is enabled, your contact information is replaced with contact information for +the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain +name> owner.\" While some domains may allow different privacy settings per contact, we +recommend specifying the same privacy setting for all contacts. This operation affects +only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email. By @@ -1444,22 +1457,25 @@ on our privacy practices, see https://aws.amazon.com/privacy/. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdminPrivacy"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon - Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all - other TLDs). If you specify false, WHOIS queries return the information that you entered - for the admin contact. You must specify the same privacy setting for the administrative, - registrant, and technical contacts. + Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return + the information that you entered for the admin contact. You must specify the same privacy + setting for the administrative, billing, registrant, and technical contacts. +- `"BillingPrivacy"`: Whether you want to conceal contact information from WHOIS queries. + If you specify true, WHOIS (\"who is\") queries return contact information either for + Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries + return the information that you entered for the billing contact. You must specify the + same privacy setting for the administrative, billing, registrant, and technical contacts. - `"RegistrantPrivacy"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either - for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, - Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that - you entered for the registrant contact (domain owner). You must specify the same privacy - setting for the administrative, registrant, and technical contacts. + for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS + queries return the information that you entered for the registrant contact (domain owner). + You must specify the same privacy setting for the administrative, billing, registrant, and + technical contacts. - `"TechPrivacy"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon - Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all - other TLDs). If you specify false, WHOIS queries return the information that you entered - for the technical contact. You must specify the same privacy setting for the - administrative, registrant, and technical contacts. + Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return + the information that you entered for the technical contact. You must specify the same + privacy setting for the administrative, billing, registrant, and technical contacts. """ function update_domain_contact_privacy( DomainName; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/rum.jl b/src/services/rum.jl index 211336b03e..f8bf48ac0a 100644 --- a/src/services/rum.jl +++ b/src/services/rum.jl @@ -12,15 +12,15 @@ Specifies the extended metrics and custom metrics that you want a CloudWatch RUM monitor to send to a destination. Valid destinations include CloudWatch and Evidently. By default, RUM app monitors send some metrics to CloudWatch. These default metrics are listed in CloudWatch metrics that you can collect with CloudWatch RUM. In addition to these -default metrics, you can choose to send extended metrics or custom metrics or both. -Extended metrics enable you to send metrics with additional dimensions not included in the -default metrics. You can also send extended metrics to Evidently as well as CloudWatch. The -valid dimension names for the additional dimensions for extended metrics are BrowserName, -CountryCode, DeviceType, FileType, OSName, and PageId. For more information, see Extended -metrics that you can send to CloudWatch and CloudWatch Evidently. Custom metrics are -metrics that you define. You can send custom metrics to CloudWatch or to CloudWatch -Evidently or to both. With custom metrics, you can use any metric name and namespace, and -to derive the metrics you can use any custom events, built-in events, custom attributes, or +default metrics, you can choose to send extended metrics, custom metrics, or both. +Extended metrics let you send metrics with additional dimensions that aren't included in +the default metrics. You can also send extended metrics to both Evidently and CloudWatch. +The valid dimension names for the additional dimensions for extended metrics are +BrowserName, CountryCode, DeviceType, FileType, OSName, and PageId. For more information, +see Extended metrics that you can send to CloudWatch and CloudWatch Evidently. Custom +metrics are metrics that you define. You can send custom metrics to CloudWatch. CloudWatch +Evidently, or both. With custom metrics, you can use any metric name and namespace. To +derive the metrics, you can use any custom events, built-in events, custom attributes, or default attributes. You can't send custom metrics to the AWS/RUM namespace. You must send custom metrics to a custom namespace that you define. The namespace that you use can't start with AWS/. CloudWatch RUM prepends RUM/CustomMetrics/ to the custom namespace that @@ -40,9 +40,9 @@ return errors, but all valid metric definitions in the same operation still succ - `app_monitor_name`: The name of the CloudWatch RUM app monitor that is to send the metrics. - `destination`: The destination to send the metrics to. Valid values are CloudWatch and - Evidently. If you specify Evidently, you must also specify the ARN of the - CloudWatchEvidently experiment that will receive the metrics and an IAM role that has - permission to write to the experiment. + Evidently. If you specify Evidently, you must also specify the Amazon Resource Name (ARN) + of the CloudWatchEvidently experiment that will receive the metrics and an IAM role that + has permission to write to the experiment. - `metric_definitions`: An array of structures which define the metrics that you want to send. @@ -627,9 +627,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DestinationArn"`: Use this parameter only if Destination is Evidently. This parameter specifies the ARN of the Evidently experiment that will receive the extended metrics. - `"IamRoleArn"`: This parameter is required if Destination is Evidently. If Destination is - CloudWatch, do not use this parameter. This parameter specifies the ARN of an IAM role that + CloudWatch, don't use this parameter. This parameter specifies the ARN of an IAM role that RUM will assume to write to the Evidently experiment that you are sending metrics to. This - role must have permission to write to that experiment. + role must have permission to write to that experiment. If you specify this parameter, you + must be signed on to a role that has PassRole permissions attached to it, to allow the role + to be passed. The CloudWatchAmazonCloudWatchRUMFullAccess policy doesn't include PassRole + permissions. """ function put_rum_metrics_destination( AppMonitorName, Destination; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/s3.jl b/src/services/s3.jl index 7336ea8c8d..d9db5a2659 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -8,39 +8,65 @@ using AWS.UUIDs abort_multipart_upload(bucket, key, upload_id) abort_multipart_upload(bucket, key, upload_id, params::Dict{String,<:Any}) -This action aborts a multipart upload. After a multipart upload is aborted, no additional -parts can be uploaded using that upload ID. The storage consumed by any previously uploaded -parts will be freed. However, if any part uploads are currently in progress, those part -uploads might or might not succeed. As a result, it might be necessary to abort a given -multipart upload multiple times in order to completely free all storage consumed by all -parts. To verify that all parts have been removed, so you don't get charged for the part -storage, you should call the ListParts action and ensure that the parts list is empty. For -information about permissions required to use the multipart upload, see Multipart Upload -and Permissions. The following operations are related to AbortMultipartUpload: -CreateMultipartUpload UploadPart CompleteMultipartUpload ListParts -ListMultipartUploads +This operation aborts a multipart upload. After a multipart upload is aborted, no +additional parts can be uploaded using that upload ID. The storage consumed by any +previously uploaded parts will be freed. However, if any part uploads are currently in +progress, those part uploads might or might not succeed. As a result, it might be necessary +to abort a given multipart upload multiple times in order to completely free all storage +consumed by all parts. To verify that all parts have been removed and prevent getting +charged for the part storage, you should call the ListParts API operation and ensure that +the parts list is empty. Directory buckets - For directory buckets, you must make +requests for this API operation to the Zonal endpoint. These endpoints support +virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Permissions General purpose bucket permissions - For information about +permissions required to use the multipart upload, see Multipart Upload and Permissions in +the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API +operation on a directory bucket, we recommend that you use the CreateSession API +operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . HTTP Host header +syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +AbortMultipartUpload: CreateMultipartUpload UploadPart CompleteMultipartUpload + ListParts ListMultipartUploads # Arguments -- `bucket`: The bucket name to which the upload was taking place. When using this action - with an access point, you must direct requests to the access point hostname. The access - point hostname takes the form - AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with - an access point through the Amazon Web Services SDKs, you provide the access point ARN in - place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. +- `bucket`: The bucket name to which the upload was taking place. Directory buckets - + When you use this operation with a directory bucket, you must use virtual-hosted-style + requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style + requests are not supported. Directory bucket names must be unique in the chosen + Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming + restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points + - When you use this action with an access point, you must provide the alias of the access + point in place of the bucket name or specify the access point ARN. When using the access + point ARN, you must direct requests to the access point hostname. The access point hostname + takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using + this action with an access point through the Amazon Web Services SDKs, you provide the + access point ARN in place of the bucket name. For more information about access point ARNs, + see Using access points in the Amazon S3 User Guide. Access points and Object Lambda + access points are not supported by directory buckets. S3 on Outposts - When you use this + action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + The S3 on Outposts hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key of the object for which the multipart upload was initiated. - `upload_id`: Upload ID that identifies the multipart upload. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function abort_multipart_upload( @@ -77,57 +103,86 @@ end complete_multipart_upload(bucket, key, upload_id, params::Dict{String,<:Any}) Completes a multipart upload by assembling previously uploaded parts. You first initiate -the multipart upload and then upload all parts using the UploadPart operation. After -successfully uploading all relevant parts of an upload, you call this action to complete -the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending -order by part number to create a new object. In the Complete Multipart Upload request, you -must provide the parts list. You must ensure that the parts list is complete. This action +the multipart upload and then upload all parts using the UploadPart operation or the +UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you +call this CompleteMultipartUpload operation to complete the upload. Upon receiving this +request, Amazon S3 concatenates all the parts in ascending order by part number to create a +new object. In the CompleteMultipartUpload request, you must provide the parts list and +ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must -provide the part number and the ETag value, returned after that part was uploaded. -Processing of a Complete Multipart Upload request could take several minutes to complete. -After Amazon S3 begins processing the request, it sends an HTTP response header that -specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends -white space characters to keep the connection from timing out. A request could fail after -the initial 200 OK response has been sent. This means that a 200 OK response can contain -either a success or an error. If you call the S3 API directly, make sure to design your -application to parse the contents of the response and handle it appropriately. If you use -Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error -and apply error handling per your configuration settings (including automatically retrying -the request as appropriate). If the condition persists, the SDKs throws an exception (or, -for the SDKs that don't use exceptions, they return the error). Note that if -CompleteMultipartUpload fails, applications should be prepared to retry the failed -requests. For more information, see Amazon S3 Error Best Practices. You cannot use -Content-Type: application/x-www-form-urlencoded with Complete Multipart Upload requests. -Also, if you do not provide a Content-Type header, CompleteMultipartUpload returns a 200 OK -response. For more information about multipart uploads, see Uploading Objects Using -Multipart Upload. For information about permissions required to use the multipart upload -API, see Multipart Upload and Permissions. CompleteMultipartUpload has the following -special errors: Error code: EntityTooSmall Description: Your proposed upload is -smaller than the minimum allowed object size. Each part must be at least 5 MB in size, -except the last part. 400 Bad Request Error code: InvalidPart Description: One or -more of the specified parts could not be found. The part might not have been uploaded, or -the specified entity tag might not have matched the part's entity tag. 400 Bad Request - Error code: InvalidPartOrder Description: The list of parts was not in ascending order. -The parts list must be specified in order by part number. 400 Bad Request Error code: -NoSuchUpload Description: The specified multipart upload does not exist. The upload ID -might be invalid, or the multipart upload might have been aborted or completed. 404 Not -Found The following operations are related to CompleteMultipartUpload: -CreateMultipartUpload UploadPart AbortMultipartUpload ListParts -ListMultipartUploads +provide the PartNumber value and the ETag value that are returned after that part was +uploaded. The processing of a CompleteMultipartUpload request could take several minutes to +finalize. After Amazon S3 begins processing the request, it sends an HTTP response header +that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically +sends white space characters to keep the connection from timing out. A request could fail +after the initial 200 OK response has been sent. This means that a 200 OK response can +contain either a success or an error. The error response might be embedded in the 200 OK +response. If you call this API operation directly, make sure to design your application to +parse the contents of the response and handle it appropriately. If you use Amazon Web +Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +error handling per your configuration settings (including automatically retrying the +request as appropriate). If the condition persists, the SDKs throw an exception (or, for +the SDKs that don't use exceptions, they return an error). Note that if +CompleteMultipartUpload fails, applications should be prepared to retry any failed requests +(including 500 error responses). For more information, see Amazon S3 Error Best Practices. +You can't use Content-Type: application/x-www-form-urlencoded for the +CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, +CompleteMultipartUpload can still return a 200 OK response. For more information about +multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User +Guide. Directory buckets - For directory buckets, you must make requests for this API +operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in +the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +requests are not supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions General purpose bucket permissions - For +information about permissions required to use the multipart upload API, see Multipart +Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To +grant access to this API operation on a directory bucket, we recommend that you use the +CreateSession API operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . Special errors + Error Code: EntityTooSmall Description: Your proposed upload is smaller than the +minimum allowed object size. Each part must be at least 5 MB in size, except the last part. + HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more +of the specified parts could not be found. The part might not have been uploaded, or the +specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad +Request Error Code: InvalidPartOrder Description: The list of parts was not in +ascending order. The parts list must be specified in order by part number. HTTP Status +Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart +upload does not exist. The upload ID might be invalid, or the multipart upload might have +been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax +Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload + ListParts ListMultipartUploads # Arguments -- `bucket`: Name of the bucket to which the multipart upload was initiated. When using this - action with an access point, you must direct requests to the access point hostname. The - access point hostname takes the form +- `bucket`: Name of the bucket to which the multipart upload was initiated. Directory + buckets - When you use this operation with a directory bucket, you must use + virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `upload_id`: ID for the initiated multipart upload. @@ -151,21 +206,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption-customer-algorithm"`: The server-side encryption (SSE) - algorithm used to encrypt the object. This parameter is needed only when the object was - created using a checksum algorithm. For more information, see Protecting data using SSE-C - keys in the Amazon S3 User Guide. + algorithm used to encrypt the object. This parameter is required only when the object was + created using a checksum algorithm or if your bucket policy requires the use of SSE-C. For + more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide. This + functionality is not supported for directory buckets. - `"x-amz-server-side-encryption-customer-key"`: The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User - Guide. + Guide. This functionality is not supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the - Amazon S3 User Guide. + Amazon S3 User Guide. This functionality is not supported for directory buckets. """ function complete_multipart_upload( Bucket, Key, uploadId; aws_config::AbstractAWSConfig=global_aws_config() @@ -204,240 +260,382 @@ Creates a copy of an object that is already stored in Amazon S3. You can store objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more -information, see Copy Object Using the REST Multipart Upload API. All copy requests must -be authenticated. Additionally, you must have read access to the source object and write -access to the destination bucket. For more information, see REST Authentication. Both the -Region that you want to copy the object from and the Region that you want to copy the -object to must be enabled for your account. A copy request might return an error when -Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error -occurs before the copy action starts, you receive a standard Amazon S3 error. If the error -occurs during the copy operation, the error response is embedded in the 200 OK response. -This means that a 200 OK response can contain either a success or an error. If you call the -S3 API directly, make sure to design your application to parse the contents of the response -and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this -condition. The SDKs detect the embedded error and apply error handling per your -configuration settings (including automatically retrying the request as appropriate). If -the condition persists, the SDKs throws an exception (or, for the SDKs that don't use -exceptions, they return the error). If the copy is successful, you receive a response with -information about the copied object. If the request is an HTTP 1.1 request, the response -is chunk encoded. If it were not, it would not contain the content-length, and you would -need to read the entire body. The copy request charge is based on the storage class and -Region that you specify for the destination object. For pricing information, see Amazon S3 -pricing. Amazon S3 transfer acceleration does not support cross-Region copies. If you -request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad -Request error. For more information, see Transfer Acceleration. Metadata When copying an -object, you can preserve all metadata (default) or specify new metadata. However, the ACL -is not preserved and is set to private for the user making the request. To override the -default ACL setting, specify a new ACL when generating a copy request. For more -information, see Using ACLs. To specify whether you want the object metadata copied from -the source object or replaced with metadata provided in the request, you can optionally add -the x-amz-metadata-directive header. When you grant permissions, you can use the -s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects -are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 -User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, -Resources, and Condition Keys for Amazon S3. x-amz-website-redirect-location is unique to -each object and must be specified in the request headers to copy the value. -x-amz-copy-source-if Headers To only copy an object under certain conditions, such as -whether the Etag matches or whether the object was modified before or after a specified -date, use the following request parameters: x-amz-copy-source-if-match -x-amz-copy-source-if-none-match x-amz-copy-source-if-unmodified-since -x-amz-copy-source-if-modified-since If both the x-amz-copy-source-if-match and -x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as -follows, Amazon S3 returns 200 OK and copies the data: x-amz-copy-source-if-match -condition evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates to -false If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -headers are present in the request and evaluate as follows, Amazon S3 returns the 412 -Precondition Failed response code: x-amz-copy-source-if-none-match condition evaluates -to false x-amz-copy-source-if-modified-since condition evaluates to true All headers -with the x-amz- prefix, including x-amz-copy-source, must be signed. Server-side -encryption Amazon S3 automatically encrypts all new objects that are copied to an S3 -bucket. When copying an object, if you don't specify encryption information in your copy -request, the encryption setting of the target object is set to the default encryption -configuration of the destination bucket. By default, all buckets have a base level of -encryption configuration that uses server-side encryption with Amazon S3 managed keys -(SSE-S3). If the destination bucket has a default encryption configuration that uses -server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a -customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a -customer-provided key to encrypt the target object copy. When you perform a CopyObject -operation, if you want to use a different type of encryption setting for the target object, -you can use other appropriate encryption-related headers to encrypt the target object with -a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side -encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and -decrypts the data when you access it. If the encryption setting in your request is -different from the default encryption configuration of the destination bucket, the -encryption setting in your request takes precedence. If the source object for the copy is -stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in -your request so that Amazon S3 can decrypt the object for copying. For more information -about server-side encryption, see Using Server-Side Encryption. If a target object uses -SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon -S3 Bucket Keys in the Amazon S3 User Guide. Access Control List (ACL)-Specific Request -Headers When copying an object, you can optionally use headers to grant ACL-based -permissions. By default, all objects are private. Only the owner has full access control. -When adding a new object, you can grant permissions to individual Amazon Web Services -accounts or to predefined groups defined by Amazon S3. These permissions are then added to -the ACL on the object. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're copying objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. For more information, see Controlling ownership of objects and disabling ACLs in -the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for Object -Ownership, all objects written to the bucket by any account will be owned by the bucket -owner. Checksums When copying an object, if it has a checksum, that checksum will be -copied to the new object by default. When you copy the object over, you may optionally -specify a different checksum algorithm to use with the x-amz-checksum-algorithm header. -Storage Class Options You can use the CopyObject action to change the storage class of an -object that is already stored in Amazon S3 using the StorageClass parameter. For more -information, see Storage Classes in the Amazon S3 User Guide. If the source object's -storage class is GLACIER, you must restore a copy of this object before you can use it as a -source object for the copy operation. For more information, see RestoreObject. For more -information, see Copying Objects. Versioning By default, x-amz-copy-source identifies the -current version of an object to copy. If the current version is a delete marker, Amazon S3 -behaves as if the object was deleted. To copy a different version, use the versionId -subresource. If you enable versioning on the target bucket, Amazon S3 generates a unique -version ID for the object being copied. This version ID is different from the version ID of -the source object. Amazon S3 returns the version ID of the copied object in the -x-amz-version-id response header in the response. If you do not enable versioning or -suspend it on the target bucket, the version ID that Amazon S3 generates is always null. -The following operations are related to CopyObject: PutObject GetObject +information, see Copy Object Using the REST Multipart Upload API. You can copy individual +objects between general purpose buckets, between directory buckets, and between general +purpose buckets and directory buckets. Directory buckets - For directory buckets, you +must make requests for this API operation to the Zonal endpoint. These endpoints support +virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Both the Region that you want to copy the object from and the Region that you want +to copy the object to must be enabled for your account. For more information about how to +enable a Region for your account, see Enable or disable a Region for standalone accounts in +the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not +support cross-Region copies. If you request a cross-Region copy using a transfer +acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer +Acceleration. Authentication and authorization All CopyObject requests must be +authenticated and signed by using IAM credentials (access key ID and secret access key for +the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must +be signed. For more information, see REST Authentication. Directory buckets - You must use +the IAM credentials to authenticate and authorize your access to the CopyObject API +operation, instead of using the temporary security credentials through the CreateSession +API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on +your behalf. Permissions You must have read access to the source object and write access +to the destination bucket. General purpose bucket permissions - You must have +permissions in an IAM policy based on the source and destination bucket types in a +CopyObject operation. If the source object is in a general purpose bucket, you must have +s3:GetObject permission to read the source object that is being copied. If the +destination bucket is a general purpose bucket, you must have s3:PutObject permission to +write the object copy to the destination bucket. Directory bucket permissions - You +must have permissions in a bucket policy or an IAM identity-based policy based on the +source and destination bucket types in a CopyObject operation. If the source object that +you want to copy is in a directory bucket, you must have the s3express:CreateSession +permission in the Action element of a policy to read the object. By default, the session is +in the ReadWrite mode. If you want to restrict the access, you can explicitly set the +s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy +destination is a directory bucket, you must have the s3express:CreateSession permission +in the Action element of a policy to write the object to the destination. The +s3express:SessionMode condition key can't be set to ReadOnly on the copy destination +bucket. For example policies, see Example bucket policies for S3 Express One Zone and +Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 +Express One Zone in the Amazon S3 User Guide. Response and special errors When the +request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an +HTTP 1.1 request, the response would not contain the Content-Length. You always need to +read the entire response body to check if the copy succeeds. to keep the connection alive +while we copy the data. If the copy is successful, you receive a response with +information about the copied object. A copy request might return an error when Amazon S3 +receives the copy request or while Amazon S3 is copying the files. A 200 OK response can +contain either a success or an error. If the error occurs before the copy action starts, +you receive a standard Amazon S3 error. If the error occurs during the copy operation, +the error response is embedded in the 200 OK response. For example, in a cross-region copy, +you may encounter throttling and receive a 200 OK response. For more information, see +Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code +means the copy was accepted, but it doesn't mean the copy is complete. Another example is +when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the +copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the +entire response is successfully received and processed. If you call this API operation +directly, make sure to design your application to parse the content of the response and +handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. +The SDKs detect the embedded error and apply error handling per your configuration settings +(including automatically retrying the request as appropriate). If the condition persists, +the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an +error). Charge The copy request charge is based on the storage class and Region that +you specify for the destination object. The request can also result in a data retrieval +charge for the source if the source storage class bills for data retrieval. If the copy +source is in a different region, the data transfer is billed to the copy source account. +For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory +buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +CopyObject: PutObject GetObject # Arguments -- `bucket`: The name of the destination bucket. When using this action with an access - point, you must direct requests to the access point hostname. The access point hostname - takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using - this action with an access point through the Amazon Web Services SDKs, you provide the - access point ARN in place of the bucket name. For more information about access point ARNs, - see Using access points in the Amazon S3 User Guide. When you use this action with Amazon - S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts +- `bucket`: The name of the destination bucket. Directory buckets - When you use this + operation with a directory bucket, you must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The key of the destination object. -- `x-amz-copy-source`: Specifies the source object for the copy operation. You specify the - value in one of two formats, depending on whether you want to access the source object - through an access point: For objects not accessed through an access point, specify the - name of the source bucket and the key of the source object, separated by a slash (/). For - example, to copy the object reports/january.pdf from the bucket awsexamplebucket, use - awsexamplebucket/reports/january.pdf. The value must be URL-encoded. For objects accessed - through access points, specify the Amazon Resource Name (ARN) of the object as accessed - through the access point, in the format +- `x-amz-copy-source`: Specifies the source object for the copy operation. The source + object can be up to 5 GB. If the source object is an object that was uploaded by using a + multipart upload, the object copy will be a single part object after the source object is + copied to the destination bucket. You specify the value of the copy source in one of two + formats, depending on whether you want to access the source object through an access point: + For objects not accessed through an access point, specify the name of the source bucket + and the key of the source object, separated by a slash (/). For example, to copy the object + reports/january.pdf from the general purpose bucket awsexamplebucket, use + awsexamplebucket/reports/january.pdf. The value must be URL-encoded. To copy the object + reports/january.pdf from the directory bucket awsexamplebucket--use1-az5--x-s3, use + awsexamplebucket--use1-az5--x-s3/reports/january.pdf. The value must be URL-encoded. For + objects accessed through access points, specify the Amazon Resource Name (ARN) of the + object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/&l t;key>. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - The value must be URL encoded. Amazon S3 supports copy operations using access points only - when the source and destination buckets are in the same Amazon Web Services Region. - Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the - object as accessed in the format + The value must be URL encoded. Amazon S3 supports copy operations using Access points + only when the source and destination buckets are in the same Amazon Web Services Region. + Access points are not supported by directory buckets. Alternatively, for objects + accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the + format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/< ;key>. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - The value must be URL-encoded. To copy a specific version of an object, append + The value must be URL-encoded. If your source bucket versioning is enabled, the + x-amz-copy-source header by default identifies the current version of an object to copy. If + the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To + copy a different version, use the versionId query parameter. Specifically, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you - don't specify a version ID, Amazon S3 copies the latest version of the source object. + don't specify a version ID, Amazon S3 copies the latest version of the source object. If + you enable versioning on the destination bucket, Amazon S3 generates a unique version ID + for the copied object. This version ID is different from the version ID of the source + object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id + response header in the response. If you do not enable versioning or suspend it on the + destination bucket, the version ID that Amazon S3 generates in the x-amz-version-id + response header is always null. Directory buckets - S3 Versioning isn't enabled and + supported for directory buckets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Cache-Control"`: Specifies caching behavior along the request/reply chain. -- `"Content-Disposition"`: Specifies presentational information for the object. +- `"Cache-Control"`: Specifies the caching behavior along the request/reply chain. +- `"Content-Disposition"`: Specifies presentational information for the object. Indicates + whether an object should be displayed in a web browser or downloaded as a file. It allows + specifying the desired filename for the downloaded file. - `"Content-Encoding"`: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by - the Content-Type header field. + the Content-Type header field. For directory buckets, only the aws-chunked value is + supported in this header field. - `"Content-Language"`: The language the content is in. -- `"Content-Type"`: A standard MIME type describing the format of the object data. +- `"Content-Type"`: A standard MIME type that describes the format of the object data. - `"Expires"`: The date and time at which the object is no longer cacheable. -- `"x-amz-acl"`: The canned ACL to apply to the object. This action is not supported by - Amazon S3 on Outposts. -- `"x-amz-checksum-algorithm"`: Indicates the algorithm you want Amazon S3 to use to create - the checksum for the object. For more information, see Checking object integrity in the - Amazon S3 User Guide. +- `"x-amz-acl"`: The canned access control list (ACL) to apply to the object. When you copy + an object, the ACL metadata is not preserved and is set to private by default. Only the + owner has full access control. To override the default ACL setting, specify a new ACL when + you generate a copy request. For more information, see Using ACLs. If the destination + bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object + Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this + setting only accept PUT requests that don't specify an ACL or PUT requests that specify + bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an + equivalent form of this ACL expressed in the XML format. For more information, see + Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. If your + destination bucket uses the bucket owner enforced setting for Object Ownership, all objects + written to the bucket by any account will be owned by the bucket owner. This + functionality is not supported for directory buckets. This functionality is not supported + for Amazon S3 on Outposts. +- `"x-amz-checksum-algorithm"`: Indicates the algorithm that you want Amazon S3 to use to + create the checksum for the object. For more information, see Checking object integrity in + the Amazon S3 User Guide. When you copy an object, if the source object has a checksum, + that checksum value will be copied to the new object by default. If the CopyObject request + does not include this x-amz-checksum-algorithm header, the checksum algorithm will be + copied from the source object to the destination object (if it's present on the source + object). You can optionally specify a different checksum algorithm to use with the + x-amz-checksum-algorithm header. Unrecognized or unsupported values will respond with the + HTTP status code 400 Bad Request. For directory buckets, when you use Amazon Web Services + SDKs, CRC32 is the default checksum algorithm that's used for performance. - `"x-amz-copy-source-if-match"`: Copies the object if its entity tag (ETag) matches the - specified tag. + specified tag. If both the x-amz-copy-source-if-match and + x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as + follows, Amazon S3 returns 200 OK and copies the data: x-amz-copy-source-if-match + condition evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates to + false - `"x-amz-copy-source-if-modified-since"`: Copies the object if it has been modified since - the specified time. + the specified time. If both the x-amz-copy-source-if-none-match and + x-amz-copy-source-if-modified-since headers are present in the request and evaluate as + follows, Amazon S3 returns the 412 Precondition Failed response code: + x-amz-copy-source-if-none-match condition evaluates to false + x-amz-copy-source-if-modified-since condition evaluates to true - `"x-amz-copy-source-if-none-match"`: Copies the object if its entity tag (ETag) is - different than the specified ETag. + different than the specified ETag. If both the x-amz-copy-source-if-none-match and + x-amz-copy-source-if-modified-since headers are present in the request and evaluate as + follows, Amazon S3 returns the 412 Precondition Failed response code: + x-amz-copy-source-if-none-match condition evaluates to false + x-amz-copy-source-if-modified-since condition evaluates to true - `"x-amz-copy-source-if-unmodified-since"`: Copies the object if it hasn't been modified - since the specified time. + since the specified time. If both the x-amz-copy-source-if-match and + x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as + follows, Amazon S3 returns 200 OK and copies the data: x-amz-copy-source-if-match + condition evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates to + false - `"x-amz-copy-source-server-side-encryption-customer-algorithm"`: Specifies the algorithm - to use when decrypting the source object (for example, AES256). + to use when decrypting the source object (for example, AES256). If the source object for + the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption + information in your request so that Amazon S3 can decrypt the object for copying. This + functionality is not supported when the source object is in a directory bucket. - `"x-amz-copy-source-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The - encryption key provided in this header must be one that was used when the source object was - created. + encryption key provided in this header must be the same one that was used when the source + object was created. If the source object for the copy is stored in Amazon S3 using SSE-C, + you must provide the necessary encryption information in your request so that Amazon S3 can + decrypt the object for copying. This functionality is not supported when the source object + is in a directory bucket. - `"x-amz-copy-source-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a - message integrity check to ensure that the encryption key was transmitted without error. + message integrity check to ensure that the encryption key was transmitted without error. If + the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the + necessary encryption information in your request so that Amazon S3 can decrypt the object + for copying. This functionality is not supported when the source object is in a directory + bucket. - `"x-amz-expected-bucket-owner"`: The account ID of the expected destination bucket owner. - If the destination bucket is owned by a different account, the request fails with the HTTP - status code 403 Forbidden (access denied). + If the account ID that you provide does not match the actual owner of the destination + bucket, the request fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-grant-full-control"`: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions - on the object. This action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read"`: Allows grantee to read the object data and its metadata. This - action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read-acp"`: Allows grantee to read the object ACL. This action is not - supported by Amazon S3 on Outposts. + on the object. This functionality is not supported for directory buckets. This + functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-read"`: Allows grantee to read the object data and its metadata. This + functionality is not supported for directory buckets. This functionality is not supported + for Amazon S3 on Outposts. +- `"x-amz-grant-read-acp"`: Allows grantee to read the object ACL. This functionality is + not supported for directory buckets. This functionality is not supported for Amazon S3 on + Outposts. - `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable object. - This action is not supported by Amazon S3 on Outposts. + This functionality is not supported for directory buckets. This functionality is not + supported for Amazon S3 on Outposts. - `"x-amz-meta-"`: A map of metadata to store with the object in S3. - `"x-amz-metadata-directive"`: Specifies whether the metadata is copied from the source - object or replaced with metadata provided in the request. + object or replaced with metadata that's provided in the request. When copying an object, + you can preserve all metadata (the default) or specify new metadata. If this header isn’t + specified, COPY is the default behavior. General purpose bucket - For general purpose + buckets, when you grant permissions, you can use the s3:x-amz-metadata-directive condition + key to enforce certain metadata behavior when objects are uploaded. For more information, + see Amazon S3 condition key examples in the Amazon S3 User Guide. + x-amz-website-redirect-location is unique to each object and is not copied when using the + x-amz-metadata-directive header. To copy the value, you must specify + x-amz-website-redirect-location in the request header. - `"x-amz-object-lock-legal-hold"`: Specifies whether you want to apply a legal hold to the - copied object. -- `"x-amz-object-lock-mode"`: The Object Lock mode that you want to apply to the copied - object. -- `"x-amz-object-lock-retain-until-date"`: The date and time when you want the copied - object's Object Lock to expire. + object copy. This functionality is not supported for directory buckets. +- `"x-amz-object-lock-mode"`: The Object Lock mode that you want to apply to the object + copy. This functionality is not supported for directory buckets. +- `"x-amz-object-lock-retain-until-date"`: The date and time when you want the Object Lock + of the object copy to expire. This functionality is not supported for directory buckets. - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). -- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the Amazon Web Services KMS - key ID to use for object encryption. All GET and PUT requests for an object protected by - Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about - configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web + this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or + unsupported values won’t write a destination object and will receive a 400 Bad Request + response. Amazon S3 automatically encrypts all new objects that are copied to an S3 + bucket. When copying an object, if you don't specify encryption information in your copy + request, the encryption setting of the target object is set to the default encryption + configuration of the destination bucket. By default, all buckets have a base level of + encryption configuration that uses server-side encryption with Amazon S3 managed keys + (SSE-S3). If the destination bucket has a default encryption configuration that uses + server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer + server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side + encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding + KMS key, or a customer-provided key to encrypt the target object copy. When you perform a + CopyObject operation, if you want to use a different type of encryption setting for the + target object, you can specify appropriate encryption-related headers to encrypt the target + object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the + encryption setting in your request is different from the default encryption configuration + of the destination bucket, the encryption setting in your request takes precedence. With + server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its + data centers and decrypts the data when you access it. For more information about + server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. For + directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) + (AES256) is supported. +- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the KMS ID (Key ID, Key ARN, + or Key Alias) to use for object encryption. All GET and PUT requests for an object + protected by KMS will fail if they're not made via SSL or using SigV4. For information + about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon - S3 User Guide. + S3 User Guide. This functionality is not supported when the destination bucket is a + directory bucket. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket + Key for the object. Setting this header to true causes Amazon S3 to use an S3 Bucket Key + for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t + affect bucket-level settings for S3 Bucket Key. For more information, see Amazon S3 Bucket + Keys in the Amazon S3 User Guide. This functionality is not supported when the destination + bucket is a directory bucket. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a - base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). + base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This + value must be explicitly added to specify encryption context for CopyObject requests. This + functionality is not supported when the destination bucket is a directory bucket. +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). When you perform a CopyObject operation, if + you want to use a different type of encryption setting for the target object, you can + specify appropriate encryption-related headers to encrypt the target object with an Amazon + S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your + request is different from the default encryption configuration of the destination bucket, + the encryption setting in your request takes precedence. This functionality is not + supported when the destination bucket is a directory bucket. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and - then it is discarded; Amazon S3 does not store the encryption key. The key must be + then it is discarded. Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + x-amz-server-side-encryption-customer-algorithm header. This functionality is not + supported when the destination bucket is a directory bucket. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported when the destination bucket is a directory bucket. - `"x-amz-source-expected-bucket-owner"`: The account ID of the expected source bucket - owner. If the source bucket is owned by a different account, the request fails with the - HTTP status code 403 Forbidden (access denied). -- `"x-amz-storage-class"`: By default, Amazon S3 uses the STANDARD Storage Class to store - newly created objects. The STANDARD storage class provides high durability and high - availability. Depending on performance needs, you can specify a different Storage Class. - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see - Storage Classes in the Amazon S3 User Guide. -- `"x-amz-tagging"`: The tag-set for the object destination object this value must be used - in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query - parameters. -- `"x-amz-tagging-directive"`: Specifies whether the object tag-set are copied from the - source object or replaced with tag-set provided in the request. -- `"x-amz-website-redirect-location"`: If the bucket is configured as a website, redirects - requests for this object to another object in the same bucket or to an external URL. Amazon - S3 stores the value of this header in the object metadata. This value is unique to each - object and is not copied when using the x-amz-metadata-directive header. Instead, you may - opt to provide this header in combination with the directive. + owner. If the account ID that you provide does not match the actual owner of the source + bucket, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-storage-class"`: If the x-amz-storage-class header is not used, the copied object + will be stored in the STANDARD Storage Class by default. The STANDARD storage class + provides high durability and high availability. Depending on performance needs, you can + specify a different Storage Class. Directory buckets - For directory buckets, only + the S3 Express One Zone storage class is supported to store newly created objects. + Unsupported storage class values won't write a destination object and will respond with the + HTTP status code 400 Bad Request. Amazon S3 on Outposts - S3 on Outposts only uses the + OUTPOSTS Storage Class. You can use the CopyObject action to change the storage class of + an object that is already stored in Amazon S3 by using the x-amz-storage-class header. For + more information, see Storage Classes in the Amazon S3 User Guide. Before using an object + as a source object for the copy operation, you must restore a copy of it if it meets any of + the following conditions: The storage class of the source object is GLACIER or + DEEP_ARCHIVE. The storage class of the source object is INTELLIGENT_TIERING and it's S3 + Intelligent-Tiering access tier is Archive Access or Deep Archive Access. For more + information, see RestoreObject and Copying Objects in the Amazon S3 User Guide. +- `"x-amz-tagging"`: The tag-set for the object copy in the destination bucket. This value + must be used in conjunction with the x-amz-tagging-directive if you choose REPLACE for the + x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, you don't need + to set the x-amz-tagging header, because the tag-set will be copied from the source object + directly. The tag-set must be encoded as URL Query parameters. The default value is the + empty value. Directory buckets - For directory buckets in a CopyObject operation, only + the empty tag-set is supported. Any requests that attempt to write non-empty tags into + directory buckets will receive a 501 Not Implemented status code. When the destination + bucket is a directory bucket, you will receive a 501 Not Implemented response in any of the + following situations: When you attempt to COPY the tag-set from an S3 source object that + has non-empty tags. When you attempt to REPLACE the tag-set of a source object and set a + non-empty value to x-amz-tagging. When you don't set the x-amz-tagging-directive header + and the source object has non-empty tags. This is because the default value of + x-amz-tagging-directive is COPY. Because only the empty tag-set is supported for + directory buckets in a CopyObject operation, the following situations are allowed: When + you attempt to COPY the tag-set from a directory bucket source object that has no tags to a + general purpose bucket. It copies an empty tag-set to the destination object. When you + attempt to REPLACE the tag-set of a directory bucket source object and set the + x-amz-tagging value of the directory bucket destination object to empty. When you attempt + to REPLACE the tag-set of a general purpose bucket source object that has non-empty tags + and set the x-amz-tagging value of the directory bucket destination object to empty. When + you attempt to REPLACE the tag-set of a directory bucket source object and don't set the + x-amz-tagging value of the directory bucket destination object. This is because the default + value of x-amz-tagging is the empty value. +- `"x-amz-tagging-directive"`: Specifies whether the object tag-set is copied from the + source object or replaced with the tag-set that's provided in the request. The default + value is COPY. Directory buckets - For directory buckets in a CopyObject operation, only + the empty tag-set is supported. Any requests that attempt to write non-empty tags into + directory buckets will receive a 501 Not Implemented status code. When the destination + bucket is a directory bucket, you will receive a 501 Not Implemented response in any of the + following situations: When you attempt to COPY the tag-set from an S3 source object that + has non-empty tags. When you attempt to REPLACE the tag-set of a source object and set a + non-empty value to x-amz-tagging. When you don't set the x-amz-tagging-directive header + and the source object has non-empty tags. This is because the default value of + x-amz-tagging-directive is COPY. Because only the empty tag-set is supported for + directory buckets in a CopyObject operation, the following situations are allowed: When + you attempt to COPY the tag-set from a directory bucket source object that has no tags to a + general purpose bucket. It copies an empty tag-set to the destination object. When you + attempt to REPLACE the tag-set of a directory bucket source object and set the + x-amz-tagging value of the directory bucket destination object to empty. When you attempt + to REPLACE the tag-set of a general purpose bucket source object that has non-empty tags + and set the x-amz-tagging value of the directory bucket destination object to empty. When + you attempt to REPLACE the tag-set of a directory bucket source object and don't set the + x-amz-tagging value of the directory bucket destination object. This is because the default + value of x-amz-tagging is the empty value. +- `"x-amz-website-redirect-location"`: If the destination bucket is configured as a + website, redirects requests for this object copy to another object in the same bucket or to + an external URL. Amazon S3 stores the value of this header in the object metadata. This + value is unique to each object and is not copied when using the x-amz-metadata-directive + header. Instead, you may opt to provide this header in combination with the + x-amz-metadata-directive header. This functionality is not supported for directory + buckets. """ function copy_object( Bucket, Key, x_amz_copy_source; aws_config::AbstractAWSConfig=global_aws_config() @@ -480,77 +678,96 @@ end create_bucket(bucket) create_bucket(bucket, params::Dict{String,<:Any}) -Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a -valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are -never allowed to create buckets. By creating the bucket, you become the bucket owner. Not -every string is an acceptable bucket name. For information about bucket naming -restrictions, see Bucket naming rules. If you want to create an Amazon S3 on Outposts -bucket, see Create Bucket. By default, the bucket is created in the US East (N. Virginia) -Region. You can optionally specify a Region in the request body. You might choose a Region -to optimize latency, minimize costs, or address regulatory requirements. For example, if -you reside in Europe, you will probably find it advantageous to create buckets in the -Europe (Ireland) Region. For more information, see Accessing a bucket. If you send your -create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 -Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 + This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see +CreateBucket . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests +are never allowed to create buckets. By creating the bucket, you become the bucket owner. +There are two types of buckets: general purpose buckets and directory buckets. For more +information about these bucket types, see Creating, configuring, and working with Amazon S3 +buckets in the Amazon S3 User Guide. General purpose buckets - If you send your +CreateBucket request to the s3.amazonaws.com global endpoint, the request goes to the +us-east-1 Region. So the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, -see Virtual hosting of buckets. Access control lists (ACLs) When creating a bucket using -this operation, you can optionally configure the bucket ACL to specify the accounts or -groups that should be granted specific permissions on the bucket. If your CreateBucket -request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that -provides access to an external Amazon Web Services account, your request fails with a 400 -error and returns the InvalidBucketAclWithObjectOwnership error code. For more information, -see Controlling object ownership in the Amazon S3 User Guide. There are two ways to grant -the appropriate permissions using the request headers. Specify a canned ACL using the -x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned -ACLs. Each canned ACL has a predefined set of grantees and permissions. For more -information, see Canned ACL. Specify access permissions explicitly using the -x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and -x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 -supports in an ACL. For more information, see Access control list (ACL) overview. You -specify each grantee as a type=value pair, where the type is one of the following: id -– if the value specified is the canonical user ID of an Amazon Web Services account -uri – if you are granting permissions to a predefined group emailAddress – if the -value specified is the email address of an Amazon Web Services account Using email -addresses to specify a grantee is only supported in the following Amazon Web Services -Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia -Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -South America (São Paulo) For a list of all the Amazon S3 supported Regions and -endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For -example, the following x-amz-grant-read header grants the Amazon Web Services accounts -identified by account IDs permissions to read object data and its metadata: -x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" You can use either a canned -ACL or specify access permissions explicitly. You cannot do both. Permissions In -addition to s3:CreateBucket, the following permissions are required when your CreateBucket -includes specific headers: ACLs - If your CreateBucket request specifies ACL permissions -and the ACL is public-read, public-read-write, authenticated-read, or if you specify access -permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl -permissions are needed. If the ACL the CreateBucket request is private or doesn't specify -any ACLs, only s3:CreateBucket permission is needed. Object Lock - If -ObjectLockEnabledForBucket is set to true in your CreateBucket request, +see Virtual hosting of buckets in the Amazon S3 User Guide. Directory buckets - For +directory buckets, you must make requests for this API operation to the Regional endpoint. +These endpoints support path-style requests in the format +https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style +requests aren't supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions General purpose bucket permissions - In addition +to the s3:CreateBucket permission, the following permissions are required in a policy when +your CreateBucket request includes specific headers: Access control lists (ACLs) - In +your CreateBucket request, if you specify an access control list (ACL) and set it to +public-read, public-read-write, authenticated-read, or if you explicitly specify any other +custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your +CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only +the s3:CreateBucket permission is required. Object Lock - In your CreateBucket request, +if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required. S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership -header, s3:PutBucketOwnershipControls permission is required. The following operations -are related to CreateBucket: PutObject DeleteBucket +header, then the s3:PutBucketOwnershipControls permission is required. To set an ACL on a +bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for +the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if +your desired bucket ACL grants public access, you must first create the bucket (without the +bucket ACL) and then explicitly disable Block Public Access on the bucket before using +PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request +will fail. For the majority of modern use cases in S3, we recommend that you keep all +Block Public Access settings enabled and keep ACLs disabled. If you would like to share +data with users outside of your account, you can use bucket policies as needed. For more +information, see Controlling ownership of objects and disabling ACLs for your bucket and +Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide. S3 +Block Public Access - If your specific use case requires granting public access to your S3 +resources, you can disable Block Public Access. Specifically, you can create a new bucket +with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. +To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more +information about S3 Block Public Access, see Blocking public access to your Amazon S3 +storage in the Amazon S3 User Guide. Directory bucket permissions - You must have +the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket +policy. Cross-account access to this API operation isn't supported. This operation can only +be performed by the Amazon Web Services account that owns the resource. For more +information about directory bucket policies and permissions, see Amazon Web Services +Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. +The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are +not supported for directory buckets. For directory buckets, all Block Public Access +settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner +enforced (ACLs disabled). These settings can't be modified. For more information about +permissions for creating and working with directory buckets, see Directory buckets in the +Amazon S3 User Guide. For more information about supported S3 features for directory +buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide. HTTP Host +header syntax Directory buckets - The HTTP Host header syntax is +s3express-control.region.amazonaws.com. The following operations are related to +CreateBucket: PutObject DeleteBucket # Arguments -- `bucket`: The name of the bucket to create. +- `bucket`: The name of the bucket to create. General purpose buckets - For information + about bucket naming restrictions, see Bucket naming rules in the Amazon S3 User Guide. + Directory buckets - When you use this operation with a directory bucket, you must use + path-style requests in the format + https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style + requests aren't supported. Directory bucket names must be unique in the chosen Availability + Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CreateBucketConfiguration"`: The configuration information for the bucket. -- `"x-amz-acl"`: The canned ACL to apply to the bucket. +- `"x-amz-acl"`: The canned ACL to apply to the bucket. This functionality is not + supported for directory buckets. - `"x-amz-bucket-object-lock-enabled"`: Specifies whether you want S3 Object Lock to be - enabled for the new bucket. + enabled for the new bucket. This functionality is not supported for directory buckets. - `"x-amz-grant-full-control"`: Allows grantee the read, write, read ACP, and write ACP - permissions on the bucket. -- `"x-amz-grant-read"`: Allows grantee to list the objects in the bucket. -- `"x-amz-grant-read-acp"`: Allows grantee to read the bucket ACL. + permissions on the bucket. This functionality is not supported for directory buckets. +- `"x-amz-grant-read"`: Allows grantee to list the objects in the bucket. This + functionality is not supported for directory buckets. +- `"x-amz-grant-read-acp"`: Allows grantee to read the bucket ACL. This functionality is + not supported for directory buckets. - `"x-amz-grant-write"`: Allows grantee to create new objects in the bucket. For the bucket and object owners of existing objects, also allows deletions and overwrites of those - objects. + objects. This functionality is not supported for directory buckets. - `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable bucket. + This functionality is not supported for directory buckets. - `"x-amz-object-ownership"`: """ function create_bucket(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) @@ -572,25 +789,48 @@ This action initiates a multipart upload and returns an upload ID. This upload I to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. -For more information about multipart uploads, see Multipart Upload Overview. If you have -configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete -within the number of days specified in the bucket lifecycle configuration. Otherwise, the -incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the -multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a -Bucket Lifecycle Configuration. For information about the permissions required to use the -multipart upload API, see Multipart Upload and Permissions. For request signing, multipart -upload is just a series of regular requests. You initiate a multipart upload, send one or -more requests to upload parts, and then complete the multipart upload process. You sign -each request individually. There is nothing special about signing multipart upload -requests. For more information about signing, see Authenticating Requests (Amazon Web -Services Signature Version 4). After you initiate a multipart upload and upload one or -more parts, to stop being charged for storing the uploaded parts, you must either complete -or abort the multipart upload. Amazon S3 frees up the space used to store the parts and -stop charging you for storing them only after you either complete or abort a multipart -upload. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your -data as it writes it to disks in its data centers and decrypts it when you access it. -Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When -doing a multipart upload, if you don't specify encryption information in your request, the +For more information about multipart uploads, see Multipart Upload Overview in the Amazon +S3 User Guide. After you initiate a multipart upload and upload one or more parts, to stop +being charged for storing the uploaded parts, you must either complete or abort the +multipart upload. Amazon S3 frees up the space used to store the parts and stops charging +you for storing them only after you either complete or abort a multipart upload. If you +have configured a lifecycle rule to abort incomplete multipart uploads, the created +multipart upload must be completed within the number of days specified in the bucket +lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an +abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting +Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration. Directory buckets +- S3 Lifecycle is not supported by directory buckets. Directory buckets - For directory +buckets, you must make requests for this API operation to the Zonal endpoint. These +endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Request signing For request signing, multipart upload is just a series of +regular requests. You initiate a multipart upload, send one or more requests to upload +parts, and then complete the multipart upload process. You sign each request individually. +There is nothing special about signing multipart upload requests. For more information +about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the +Amazon S3 User Guide. Permissions General purpose bucket permissions - For information +about the permissions required to use the multipart upload API, see Multipart upload and +permissions in the Amazon S3 User Guide. To perform a multipart upload with encryption by +using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt +and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon +S3 must decrypt and read data from the encrypted file parts before it completes the +multipart upload. For more information, see Multipart upload API and permissions and +Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 +User Guide. Directory bucket permissions - To grant access to this API operation on a +directory bucket, we recommend that you use the CreateSession API operation for +session-based authorization. Specifically, you grant the s3express:CreateSession permission +to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make +the CreateSession API call on the bucket to obtain a session token. With the session token +in your request header, you can make API requests to this operation. After the session +token expires, you make another CreateSession API call to generate a new session token for +use. Amazon Web Services CLI or SDKs create session and refresh the session token +automatically to avoid service interruptions when a session expires. For more information +about authorization, see CreateSession . Encryption General purpose buckets - +Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it +writes it to disks in its data centers and decrypts it when you access it. Amazon S3 +automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a +multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the @@ -599,99 +839,71 @@ with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encry (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon -S3 encrypts the object with a KMS key, an Amazon S3 managed key, or a customer-provided -key. If the encryption setting in your request is different from the default encryption -configuration of the destination bucket, the encryption setting in your request takes -precedence. If you choose to provide your own encryption key, the request headers you -provide in UploadPart and UploadPartCopy requests must match the headers you used in the -request to initiate the upload by using CreateMultipartUpload. You can request that Amazon -S3 save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed -key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided -encryption key (SSE-C). To perform a multipart upload with encryption by using an Amazon -Web Services KMS key, the requester must have permission to the kms:Decrypt and -kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 -must decrypt and read data from the encrypted file parts before it completes the multipart -upload. For more information, see Multipart upload API and permissions and Protecting data -using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. If -your Identity and Access Management (IAM) user or role is in the same Amazon Web Services -account as the KMS key, then you must have these permissions on the key policy. If your IAM -user or role belongs to a different account than the key, then you must have the -permissions on both the key policy and your IAM user or role. For more information, see -Protecting Data Using Server-Side Encryption. Access Permissions When copying an object, -you can optionally specify the accounts or groups that should be granted specific -permissions on the new object. There are two ways to grant the permissions using the -request headers: Specify a canned ACL with the x-amz-acl request header. For more -information, see Canned ACL. Specify access permissions explicitly with the -x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. -For more information, see Access Control List (ACL) Overview. You can use either a canned -ACL or specify access permissions explicitly. You cannot do both. Server-Side- -Encryption-Specific Request Headers Amazon S3 encrypts data by using server-side -encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for -data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data -centers and decrypts it when you access it. You can request that Amazon S3 encrypts data at -rest by using server-side encryption with other key options. The option you use depends on -whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys (SSE-C). -Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS -customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web -Services to manage the keys used to encrypt data, specify the following headers in the -request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id - x-amz-server-side-encryption-context If you specify +S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a +KMS key, or a customer-provided key). When the encryption setting in your request is +different from the default encryption configuration of the destination bucket, the +encryption setting in your request takes precedence. If you choose to provide your own +encryption key, the request headers you provide in UploadPart and UploadPartCopy requests +must match the headers you used in the CreateMultipartUpload request. Use KMS keys +(SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer +managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to +manage the keys used to encrypt data, specify the following headers in the request. +x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id +x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed -key (aws/s3 key) in KMS to protect the data. All GET and PUT requests for an object -protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport -Layer Security (TLS), or Signature Version 4. For more information about server-side -encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with -KMS keys. Use customer-provided encryption keys (SSE-C) – If you want to manage your -own encryption keys, provide all the following headers in the request. +key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with +encryption by using an Amazon Web Services KMS key, the requester must have permission to +the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required +because Amazon S3 must decrypt and read data from the encrypted file parts before it +completes the multipart upload. For more information, see Multipart upload API and +permissions and Protecting data using server-side encryption with Amazon Web Services KMS +in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user or role is +in the same Amazon Web Services account as the KMS key, then you must have these +permissions on the key policy. If your IAM user or role is in a different account from the +key, then you must have the permissions on both the key policy and your IAM user or role. +All GET and PUT requests for an object protected by KMS fail if you don't make them by +using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. +For information about configuring any of the officially supported Amazon Web Services SDKs +and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication +in the Amazon S3 User Guide. For more information about server-side encryption with KMS +keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the +Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) – If you want to +manage your own encryption keys, provide all the following headers in the request. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided -encryption keys (SSE-C). Access-Control-List (ACL)-Specific Request Headers You also -can use the following access control–related headers with this operation. By default, all -objects are private. Only the owner has full access control. When adding a new object, you -can grant permissions to individual Amazon Web Services accounts or to predefined groups -defined by Amazon S3. These permissions are then added to the access control list (ACL) on -the object. For more information, see Using ACLs. With this operation, you can grant access -permissions using one of the following two methods: Specify a canned ACL (x-amz-acl) — -Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -predefined set of grantees and permissions. For more information, see Canned ACL. Specify -access permissions explicitly — To explicitly grant access permissions to specific Amazon -Web Services accounts or groups, use the following headers. Each header maps to specific -permissions that Amazon S3 supports in an ACL. For more information, see Access Control -List (ACL) Overview. In the header, you specify a list of grantees who get the specific -permission. To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write - x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -specify each grantee as a type=value pair, where the type is one of the following: id -– if the value specified is the canonical user ID of an Amazon Web Services account -uri – if you are granting permissions to a predefined group emailAddress – if the -value specified is the email address of an Amazon Web Services account Using email -addresses to specify a grantee is only supported in the following Amazon Web Services -Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia -Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -South America (São Paulo) For a list of all the Amazon S3 supported Regions and -endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For -example, the following x-amz-grant-read header grants the Amazon Web Services accounts -identified by account IDs permissions to read object data and its metadata: -x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" The following operations -are related to CreateMultipartUpload: UploadPart CompleteMultipartUpload +encryption keys (SSE-C) in the Amazon S3 User Guide. Directory buckets -For directory +buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is +supported. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is + Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related +to CreateMultipartUpload: UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads # Arguments -- `bucket`: The name of the bucket to which to initiate the upload When using this action - with an access point, you must direct requests to the access point hostname. The access - point hostname takes the form +- `bucket`: The name of the bucket where the multipart upload is initiated and where the + object is uploaded. Directory buckets - When you use this operation with a directory + bucket, you must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload is to be initiated. # Optional Parameters @@ -700,70 +912,154 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Content-Disposition"`: Specifies presentational information for the object. - `"Content-Encoding"`: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by - the Content-Type header field. -- `"Content-Language"`: The language the content is in. + the Content-Type header field. For directory buckets, only the aws-chunked value is + supported in this header field. +- `"Content-Language"`: The language that the content is in. - `"Content-Type"`: A standard MIME type describing the format of the object data. - `"Expires"`: The date and time at which the object is no longer cacheable. -- `"x-amz-acl"`: The canned ACL to apply to the object. This action is not supported by - Amazon S3 on Outposts. -- `"x-amz-checksum-algorithm"`: Indicates the algorithm you want Amazon S3 to use to create - the checksum for the object. For more information, see Checking object integrity in the - Amazon S3 User Guide. +- `"x-amz-acl"`: The canned ACL to apply to the object. Amazon S3 supports a set of + predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and + permissions. For more information, see Canned ACL in the Amazon S3 User Guide. By default, + all objects are private. Only the owner has full access control. When uploading an object, + you can grant access permissions to individual Amazon Web Services accounts or to + predefined groups defined by Amazon S3. These permissions are then added to the access + control list (ACL) on the new object. For more information, see Using ACLs. One way to + grant the permissions using the request headers is to specify a canned ACL with the + x-amz-acl request header. This functionality is not supported for directory buckets. + This functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-checksum-algorithm"`: Indicates the algorithm that you want Amazon S3 to use to + create the checksum for the object. For more information, see Checking object integrity in + the Amazon S3 User Guide. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). -- `"x-amz-grant-full-control"`: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions - on the object. This action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read"`: Allows grantee to read the object data and its metadata. This - action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read-acp"`: Allows grantee to read the object ACL. This action is not - supported by Amazon S3 on Outposts. -- `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable object. - This action is not supported by Amazon S3 on Outposts. + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-grant-full-control"`: Specify access permissions explicitly to give the grantee + READ, READ_ACP, and WRITE_ACP permissions on the object. By default, all objects are + private. Only the owner has full access control. When uploading an object, you can use this + header to explicitly grant access permissions to specific Amazon Web Services accounts or + groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For + more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide. You + specify each grantee as a type=value pair, where the type is one of the following: id + – if the value specified is the canonical user ID of an Amazon Web Services account + uri – if you are granting permissions to a predefined group emailAddress – if the + value specified is the email address of an Amazon Web Services account Using email + addresses to specify a grantee is only supported in the following Amazon Web Services + Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia + Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) + South America (São Paulo) For a list of all the Amazon S3 supported Regions and + endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For + example, the following x-amz-grant-read header grants the Amazon Web Services accounts + identified by account IDs permissions to read object data and its metadata: + x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" This functionality is not + supported for directory buckets. This functionality is not supported for Amazon S3 on + Outposts. +- `"x-amz-grant-read"`: Specify access permissions explicitly to allow grantee to read the + object data and its metadata. By default, all objects are private. Only the owner has full + access control. When uploading an object, you can use this header to explicitly grant + access permissions to specific Amazon Web Services accounts or groups. This header maps to + specific permissions that Amazon S3 supports in an ACL. For more information, see Access + Control List (ACL) Overview in the Amazon S3 User Guide. You specify each grantee as a + type=value pair, where the type is one of the following: id – if the value specified + is the canonical user ID of an Amazon Web Services account uri – if you are granting + permissions to a predefined group emailAddress – if the value specified is the email + address of an Amazon Web Services account Using email addresses to specify a grantee is + only supported in the following Amazon Web Services Regions: US East (N. Virginia) US + West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia Pacific + (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São Paulo) For a + list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the + Amazon Web Services General Reference. For example, the following x-amz-grant-read + header grants the Amazon Web Services accounts identified by account IDs permissions to + read object data and its metadata: x-amz-grant-read: id=\"11112222333\", + id=\"444455556666\" This functionality is not supported for directory buckets. This + functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-read-acp"`: Specify access permissions explicitly to allows grantee to read + the object ACL. By default, all objects are private. Only the owner has full access + control. When uploading an object, you can use this header to explicitly grant access + permissions to specific Amazon Web Services accounts or groups. This header maps to + specific permissions that Amazon S3 supports in an ACL. For more information, see Access + Control List (ACL) Overview in the Amazon S3 User Guide. You specify each grantee as a + type=value pair, where the type is one of the following: id – if the value specified + is the canonical user ID of an Amazon Web Services account uri – if you are granting + permissions to a predefined group emailAddress – if the value specified is the email + address of an Amazon Web Services account Using email addresses to specify a grantee is + only supported in the following Amazon Web Services Regions: US East (N. Virginia) US + West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia Pacific + (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São Paulo) For a + list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the + Amazon Web Services General Reference. For example, the following x-amz-grant-read + header grants the Amazon Web Services accounts identified by account IDs permissions to + read object data and its metadata: x-amz-grant-read: id=\"11112222333\", + id=\"444455556666\" This functionality is not supported for directory buckets. This + functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-write-acp"`: Specify access permissions explicitly to allows grantee to + allow grantee to write the ACL for the applicable object. By default, all objects are + private. Only the owner has full access control. When uploading an object, you can use this + header to explicitly grant access permissions to specific Amazon Web Services accounts or + groups. This header maps to specific permissions that Amazon S3 supports in an ACL. For + more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide. You + specify each grantee as a type=value pair, where the type is one of the following: id + – if the value specified is the canonical user ID of an Amazon Web Services account + uri – if you are granting permissions to a predefined group emailAddress – if the + value specified is the email address of an Amazon Web Services account Using email + addresses to specify a grantee is only supported in the following Amazon Web Services + Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia + Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) + South America (São Paulo) For a list of all the Amazon S3 supported Regions and + endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For + example, the following x-amz-grant-read header grants the Amazon Web Services accounts + identified by account IDs permissions to read object data and its metadata: + x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" This functionality is not + supported for directory buckets. This functionality is not supported for Amazon S3 on + Outposts. - `"x-amz-meta-"`: A map of metadata to store with the object in S3. - `"x-amz-object-lock-legal-hold"`: Specifies whether you want to apply a legal hold to the - uploaded object. + uploaded object. This functionality is not supported for directory buckets. - `"x-amz-object-lock-mode"`: Specifies the Object Lock mode that you want to apply to the - uploaded object. + uploaded object. This functionality is not supported for directory buckets. - `"x-amz-object-lock-retain-until-date"`: Specifies the date and time when you want the - Object Lock to expire. + Object Lock to expire. This functionality is not supported for directory buckets. - `"x-amz-request-payer"`: -- `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). -- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the ID of the symmetric - encryption customer managed key to use for object encryption. All GET and PUT requests for - an object protected by Amazon Web Services KMS will fail if not made via SSL or using - SigV4. For information about configuring using any of the officially supported Amazon Web - Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request - Authentication in the Amazon S3 User Guide. +- `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when you + store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only + server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the ID (Key ID, Key ARN, or + Key Alias) of the symmetric encryption customer managed key to use for object encryption. + This functionality is not supported for directory buckets. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with an object action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with an object action + doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a - base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). + base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This + functionality is not supported for directory buckets. +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + x-amz-server-side-encryption-customer-algorithm header. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of - the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for + a message integrity check to ensure that the encryption key was transmitted without error. + This functionality is not supported for directory buckets. - `"x-amz-storage-class"`: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see - Storage Classes in the Amazon S3 User Guide. + For more information, see Storage Classes in the Amazon S3 User Guide. For directory + buckets, only the S3 Express One Zone storage class is supported to store newly created + objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - `"x-amz-tagging"`: The tag-set for the object. The tag-set must be encoded as URL Query - parameters. + parameters. This functionality is not supported for directory buckets. - `"x-amz-website-redirect-location"`: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon - S3 stores the value of this header in the object metadata. + S3 stores the value of this header in the object metadata. This functionality is not + supported for directory buckets. """ function create_multipart_upload( Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config() @@ -790,22 +1086,124 @@ function create_multipart_upload( ) end +""" + create_session(bucket) + create_session(bucket, params::Dict{String,<:Any}) + +Creates a session that establishes temporary security credentials to support fast +authentication and authorization for the Zonal endpoint APIs on directory buckets. For more +information about Zonal endpoint APIs that include the Availability Zone in the request +endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint +API requests on a directory bucket, use the CreateSession API operation. Specifically, you +grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM +identity-based policy. Then, you use IAM credentials to make the CreateSession API request +on the bucket, which returns temporary security credentials that include the access key ID, +secret access key, session token, and expiration. These credentials have associated +permissions to access the Zonal endpoint APIs. After the session is created, you don’t +need to use other policies to grant permissions to each Zonal endpoint API individually. +Instead, in your Zonal endpoint API requests, you sign your requests by applying the +temporary security credentials of the session to the request headers and following the +SigV4 protocol for authentication. You also apply the session token to the +x-amz-s3session-token request header for authorization. Temporary security credentials are +scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that +you make with those credentials will fail. You must use IAM credentials again to make a +CreateSession API request that generates a new set of temporary credentials for use. +Temporary credentials cannot be extended or refreshed beyond the original specified +interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes +automatically to avoid service interruptions when a session expires. We recommend that you +use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. +For more information, see Performance guidelines and design patterns in the Amazon S3 User +Guide. You must make requests for this API operation to the Zonal endpoint. These +endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not +supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject API +operation doesn't use the temporary security credentials returned from the CreateSession +API operation for authentication and authorization. For information about authentication +and authorization of the CopyObject API operation on directory buckets, see CopyObject. +HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket API operation +doesn't use the temporary security credentials returned from the CreateSession API +operation for authentication and authorization. For information about authentication and +authorization of the HeadBucket API operation on directory buckets, see HeadBucket. +Permissions To obtain temporary security credentials, you must create a bucket policy or +an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. +In a policy, you can have the s3express:SessionMode condition key to control who can create +a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, +see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 +Express One Zone and Amazon Web Services Identity and Access Management (IAM) +identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant +cross-account access to Zonal endpoint APIs, the bucket policy should also grant both +accounts the s3express:CreateSession permission. HTTP Host header syntax Directory +buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. + +# Arguments +- `bucket`: The name of the bucket that you create a session for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"x-amz-create-session-mode"`: Specifies the mode of the session that will be created, + either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite + session is capable of executing all the Zonal endpoint APIs on a directory bucket. A + ReadOnly session is constrained to execute the following Zonal endpoint APIs: GetObject, + HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. +""" +function create_session(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) + return s3( + "GET", "/$(Bucket)?session"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function create_session( + Bucket, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3( + "GET", + "/$(Bucket)?session", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_bucket(bucket) delete_bucket(bucket, params::Dict{String,<:Any}) Deletes the S3 bucket. All objects (including all object versions and delete markers) in -the bucket must be deleted before the bucket itself can be deleted. The following +the bucket must be deleted before the bucket itself can be deleted. Directory buckets - +If multipart uploads in a directory bucket are in progress, you can't delete the bucket +until all the in-progress multipart uploads are aborted or completed. Directory buckets +- For directory buckets, you must make requests for this API operation to the Regional +endpoint. These endpoints support path-style requests in the format +https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style +requests aren't supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions General purpose bucket permissions - You must +have the s3:DeleteBucket permission on the specified bucket in a policy. Directory +bucket permissions - You must have the s3express:DeleteBucket permission in an IAM +identity-based policy instead of a bucket policy. Cross-account access to this API +operation isn't supported. This operation can only be performed by the Amazon Web Services +account that owns the resource. For more information about directory bucket policies and +permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express +One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to DeleteBucket: CreateBucket DeleteObject # Arguments -- `bucket`: Specifies the bucket being deleted. +- `bucket`: Specifies the bucket being deleted. Directory buckets - When you use this + operation with a directory bucket, you must use path-style requests in the format + https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style + requests aren't supported. Directory bucket names must be unique in the chosen Availability + Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this + header is not supported in this API operation. If you specify this header, the request + fails with the HTTP status code 501 Not Implemented. """ function delete_bucket(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -828,15 +1226,16 @@ end delete_bucket_analytics_configuration(bucket, id) delete_bucket_analytics_configuration(bucket, id, params::Dict{String,<:Any}) -Deletes an analytics configuration for the bucket (specified by the analytics configuration -ID). To use this operation, you must have permissions to perform the -s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. For information about the Amazon S3 analytics feature, see Amazon -S3 Analytics – Storage Class Analysis. The following operations are related to -DeleteBucketAnalyticsConfiguration: GetBucketAnalyticsConfiguration -ListBucketAnalyticsConfigurations PutBucketAnalyticsConfiguration + This operation is not supported by directory buckets. Deletes an analytics configuration +for the bucket (specified by the analytics configuration ID). To use this operation, you +must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner +has this permission by default. The bucket owner can grant this permission to others. For +more information about permissions, see Permissions Related to Bucket Subresource +Operations and Managing Access Permissions to Your Amazon S3 Resources. For information +about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis. +The following operations are related to DeleteBucketAnalyticsConfiguration: +GetBucketAnalyticsConfiguration ListBucketAnalyticsConfigurations +PutBucketAnalyticsConfiguration # Arguments - `bucket`: The name of the bucket from which an analytics configuration is deleted. @@ -845,8 +1244,8 @@ ListBucketAnalyticsConfigurations PutBucketAnalyticsConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_analytics_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -878,11 +1277,12 @@ end delete_bucket_cors(bucket) delete_bucket_cors(bucket, params::Dict{String,<:Any}) -Deletes the cors configuration information set for the bucket. To use this operation, you -must have permission to perform the s3:PutBucketCORS action. The bucket owner has this -permission by default and can grant this permission to others. For information about cors, -see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. The following -operations are related to DeleteBucketCors: PutBucketCors RESTOPTIONSobject + This operation is not supported by directory buckets. Deletes the cors configuration +information set for the bucket. To use this operation, you must have permission to perform +the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant +this permission to others. For information about cors, see Enabling Cross-Origin Resource +Sharing in the Amazon S3 User Guide. Related Resources PutBucketCors +RESTOPTIONSobject # Arguments - `bucket`: Specifies the bucket whose cors configuration is being deleted. @@ -890,8 +1290,8 @@ operations are related to DeleteBucketCors: PutBucketCors RESTOPTIONSobje # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_cors(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -914,15 +1314,16 @@ end delete_bucket_encryption(bucket) delete_bucket_encryption(bucket, params::Dict{String,<:Any}) -This implementation of the DELETE action resets the default encryption for the bucket as -server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the -bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 -User Guide. To use this operation, you must have permissions to perform the -s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related -to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption + This operation is not supported by directory buckets. This implementation of the DELETE +action resets the default encryption for the bucket as server-side encryption with Amazon +S3 managed keys (SSE-S3). For information about the bucket default encryption feature, see +Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this operation, you +must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner +has this permission by default. The bucket owner can grant this permission to others. For +more information about permissions, see Permissions Related to Bucket Subresource +Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 +User Guide. The following operations are related to DeleteBucketEncryption: +PutBucketEncryption GetBucketEncryption # Arguments - `bucket`: The name of the bucket containing the server-side encryption configuration to @@ -931,8 +1332,8 @@ to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_encryption(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -958,21 +1359,22 @@ end delete_bucket_intelligent_tiering_configuration(bucket, id) delete_bucket_intelligent_tiering_configuration(bucket, id, params::Dict{String,<:Any}) -Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The S3 -Intelligent-Tiering storage class is designed to optimize storage costs by automatically -moving data to the most cost-effective storage access tier, without performance impact or -operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low -latency and high throughput access tiers. To get the lowest storage cost on data that can -be accessed in minutes to hours, you can choose to activate additional archiving -capabilities. The S3 Intelligent-Tiering storage class is the ideal storage class for data -with unknown, changing, or unpredictable access patterns, independent of object size or -retention period. If the size of an object is less than 128 KB, it is not monitored and not -eligible for auto-tiering. Smaller objects can be stored, but they are always charged at -the Frequent Access tier rates in the S3 Intelligent-Tiering storage class. For more -information, see Storage class for automatically optimizing frequently and infrequently -accessed objects. Operations related to DeleteBucketIntelligentTieringConfiguration -include: GetBucketIntelligentTieringConfiguration -PutBucketIntelligentTieringConfiguration ListBucketIntelligentTieringConfigurations + This operation is not supported by directory buckets. Deletes the S3 Intelligent-Tiering +configuration from the specified bucket. The S3 Intelligent-Tiering storage class is +designed to optimize storage costs by automatically moving data to the most cost-effective +storage access tier, without performance impact or operational overhead. S3 +Intelligent-Tiering delivers automatic cost savings in three low latency and high +throughput access tiers. To get the lowest storage cost on data that can be accessed in +minutes to hours, you can choose to activate additional archiving capabilities. The S3 +Intelligent-Tiering storage class is the ideal storage class for data with unknown, +changing, or unpredictable access patterns, independent of object size or retention period. +If the size of an object is less than 128 KB, it is not monitored and not eligible for +auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent +Access tier rates in the S3 Intelligent-Tiering storage class. For more information, see +Storage class for automatically optimizing frequently and infrequently accessed objects. +Operations related to DeleteBucketIntelligentTieringConfiguration include: +GetBucketIntelligentTieringConfiguration PutBucketIntelligentTieringConfiguration +ListBucketIntelligentTieringConfigurations # Arguments - `bucket`: The name of the Amazon S3 bucket whose configuration you want to modify or @@ -1010,14 +1412,15 @@ end delete_bucket_inventory_configuration(bucket, id) delete_bucket_inventory_configuration(bucket, id, params::Dict{String,<:Any}) -Deletes an inventory configuration (identified by the inventory ID) from the bucket. To use -this operation, you must have permissions to perform the s3:PutInventoryConfiguration -action. The bucket owner has this permission by default. The bucket owner can grant this -permission to others. For more information about permissions, see Permissions Related to -Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. -For information about the Amazon S3 inventory feature, see Amazon S3 Inventory. Operations -related to DeleteBucketInventoryConfiguration include: GetBucketInventoryConfiguration - PutBucketInventoryConfiguration ListBucketInventoryConfigurations + This operation is not supported by directory buckets. Deletes an inventory configuration +(identified by the inventory ID) from the bucket. To use this operation, you must have +permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this +permission by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources. For information about the Amazon +S3 inventory feature, see Amazon S3 Inventory. Operations related to +DeleteBucketInventoryConfiguration include: GetBucketInventoryConfiguration +PutBucketInventoryConfiguration ListBucketInventoryConfigurations # Arguments - `bucket`: The name of the bucket containing the inventory configuration to delete. @@ -1026,8 +1429,8 @@ related to DeleteBucketInventoryConfiguration include: GetBucketInventoryCon # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_inventory_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -1059,16 +1462,17 @@ end delete_bucket_lifecycle(bucket) delete_bucket_lifecycle(bucket, params::Dict{String,<:Any}) -Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the -lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your -objects never expire, and Amazon S3 no longer automatically deletes any objects on the -basis of rules contained in the deleted lifecycle configuration. To use this operation, you -must have permission to perform the s3:PutLifecycleConfiguration action. By default, the -bucket owner has this permission and the bucket owner can grant this permission to others. -There is usually some time lag before lifecycle configuration deletion is fully propagated -to all the Amazon S3 systems. For more information about the object expiration, see -Elements to Describe Lifecycle Actions. Related actions include: -PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration + This operation is not supported by directory buckets. Deletes the lifecycle configuration +from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the +lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 +no longer automatically deletes any objects on the basis of rules contained in the deleted +lifecycle configuration. To use this operation, you must have permission to perform the +s3:PutLifecycleConfiguration action. By default, the bucket owner has this permission and +the bucket owner can grant this permission to others. There is usually some time lag before +lifecycle configuration deletion is fully propagated to all the Amazon S3 systems. For more +information about the object expiration, see Elements to Describe Lifecycle Actions. +Related actions include: PutBucketLifecycleConfiguration +GetBucketLifecycleConfiguration # Arguments - `bucket`: The bucket name of the lifecycle to delete. @@ -1076,8 +1480,8 @@ PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_lifecycle(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1103,17 +1507,17 @@ end delete_bucket_metrics_configuration(bucket, id) delete_bucket_metrics_configuration(bucket, id, params::Dict{String,<:Any}) -Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the -metrics configuration ID) from the bucket. Note that this doesn't include the daily storage -metrics. To use this operation, you must have permissions to perform the -s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. For information about CloudWatch request metrics for Amazon S3, -see Monitoring Metrics with Amazon CloudWatch. The following operations are related to -DeleteBucketMetricsConfiguration: GetBucketMetricsConfiguration -PutBucketMetricsConfiguration ListBucketMetricsConfigurations Monitoring Metrics -with Amazon CloudWatch + This operation is not supported by directory buckets. Deletes a metrics configuration for +the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the +bucket. Note that this doesn't include the daily storage metrics. To use this operation, +you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket +owner has this permission by default. The bucket owner can grant this permission to others. +For more information about permissions, see Permissions Related to Bucket Subresource +Operations and Managing Access Permissions to Your Amazon S3 Resources. For information +about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon +CloudWatch. The following operations are related to DeleteBucketMetricsConfiguration: +GetBucketMetricsConfiguration PutBucketMetricsConfiguration +ListBucketMetricsConfigurations Monitoring Metrics with Amazon CloudWatch # Arguments - `bucket`: The name of the bucket containing the metrics configuration to delete. @@ -1123,8 +1527,8 @@ with Amazon CloudWatch # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_metrics_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -1156,12 +1560,12 @@ end delete_bucket_ownership_controls(bucket) delete_bucket_ownership_controls(bucket, params::Dict{String,<:Any}) -Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the -s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, -see Specifying Permissions in a Policy. For information about Amazon S3 Object Ownership, -see Using Object Ownership. The following operations are related to -DeleteBucketOwnershipControls: GetBucketOwnershipControls PutBucketOwnershipControls - + This operation is not supported by directory buckets. Removes OwnershipControls for an +Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls +permission. For more information about Amazon S3 permissions, see Specifying Permissions in +a Policy. For information about Amazon S3 Object Ownership, see Using Object Ownership. +The following operations are related to DeleteBucketOwnershipControls: +GetBucketOwnershipControls PutBucketOwnershipControls # Arguments - `bucket`: The Amazon S3 bucket whose OwnershipControls you want to delete. @@ -1169,8 +1573,8 @@ DeleteBucketOwnershipControls: GetBucketOwnershipControls PutBucketOwners # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_ownership_controls( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -1198,30 +1602,52 @@ end delete_bucket_policy(bucket) delete_bucket_policy(bucket, params::Dict{String,<:Any}) -This implementation of the DELETE action uses the policy subresource to delete the policy -of a specified bucket. If you are using an identity other than the root user of the Amazon -Web Services account that owns the bucket, the calling identity must have the -DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's -account to use this operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 -returns a 403 Access Denied error. If you have the correct permissions, but you're not -using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -Method Not Allowed error. To ensure that bucket owners don't inadvertently lock -themselves out of their own buckets, the root principal in a bucket owner's Amazon Web -Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy -API actions, even if their bucket policy explicitly denies the root principal's access. -Bucket owner root principals can only be blocked from performing these API actions by VPC -endpoint policies and Amazon Web Services Organizations policies. For more information -about bucket policies, see Using Bucket Policies and UserPolicies. The following -operations are related to DeleteBucketPolicy CreateBucket DeleteObject +Deletes the policy of a specified bucket. Directory buckets - For directory buckets, you +must make requests for this API operation to the Regional endpoint. These endpoints support +path-style requests in the format +https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style +requests aren't supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions If you are using an identity other than the root user +of the Amazon Web Services account that owns the bucket, the calling identity must both +have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket +owner's account in order to use this operation. If you don't have DeleteBucketPolicy +permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct +permissions, but you're not using an identity that belongs to the bucket owner's account, +Amazon S3 returns a 405 Method Not Allowed error. To ensure that bucket owners don't +inadvertently lock themselves out of their own buckets, the root principal in a bucket +owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and +DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root +principal's access. Bucket owner root principals can only be blocked from performing these +API actions by VPC endpoint policies and Amazon Web Services Organizations policies. +General purpose bucket permissions - The s3:DeleteBucketPolicy permission is required in a +policy. For more information about general purpose buckets bucket policies, see Using +Bucket Policies and User Policies in the Amazon S3 User Guide. Directory bucket +permissions - To grant access to this API operation, you must have the +s3express:DeleteBucketPolicy permission in an IAM identity-based policy instead of a bucket +policy. Cross-account access to this API operation isn't supported. This operation can only +be performed by the Amazon Web Services account that owns the resource. For more +information about directory bucket policies and permissions, see Amazon Web Services +Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. + HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +s3express-control.region.amazonaws.com. The following operations are related to +DeleteBucketPolicy CreateBucket DeleteObject # Arguments -- `bucket`: The bucket name. +- `bucket`: The bucket name. Directory buckets - When you use this operation with a + directory bucket, you must use path-style requests in the format + https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style + requests aren't supported. Directory bucket names must be unique in the chosen Availability + Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this + header is not supported in this API operation. If you specify this header, the request + fails with the HTTP status code 501 Not Implemented. """ function delete_bucket_policy(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1247,14 +1673,15 @@ end delete_bucket_replication(bucket) delete_bucket_replication(bucket, params::Dict{String,<:Any}) - Deletes the replication configuration from the bucket. To use this operation, you must -have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has -these permissions by default and can grant it to others. For more information about -permissions, see Permissions Related to Bucket Subresource Operations and Managing Access -Permissions to Your Amazon S3 Resources. It can take a while for the deletion of a -replication configuration to fully propagate. For information about replication -configuration, see Replication in the Amazon S3 User Guide. The following operations are -related to DeleteBucketReplication: PutBucketReplication GetBucketReplication + This operation is not supported by directory buckets. Deletes the replication +configuration from the bucket. To use this operation, you must have permissions to perform +the s3:PutReplicationConfiguration action. The bucket owner has these permissions by +default and can grant it to others. For more information about permissions, see Permissions +Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 +Resources. It can take a while for the deletion of a replication configuration to fully +propagate. For information about replication configuration, see Replication in the Amazon +S3 User Guide. The following operations are related to DeleteBucketReplication: +PutBucketReplication GetBucketReplication # Arguments - `bucket`: The bucket name. @@ -1262,8 +1689,8 @@ related to DeleteBucketReplication: PutBucketReplication GetBucketReplica # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_replication( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -1291,10 +1718,11 @@ end delete_bucket_tagging(bucket) delete_bucket_tagging(bucket, params::Dict{String,<:Any}) -Deletes the tags from the bucket. To use this operation, you must have permission to -perform the s3:PutBucketTagging action. By default, the bucket owner has this permission -and can grant this permission to others. The following operations are related to -DeleteBucketTagging: GetBucketTagging PutBucketTagging + This operation is not supported by directory buckets. Deletes the tags from the bucket. +To use this operation, you must have permission to perform the s3:PutBucketTagging action. +By default, the bucket owner has this permission and can grant this permission to others. +The following operations are related to DeleteBucketTagging: GetBucketTagging +PutBucketTagging # Arguments - `bucket`: The bucket that has the tag set to be removed. @@ -1302,8 +1730,8 @@ DeleteBucketTagging: GetBucketTagging PutBucketTagging # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_tagging(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1329,17 +1757,17 @@ end delete_bucket_website(bucket) delete_bucket_website(bucket, params::Dict{String,<:Any}) -This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK -response upon successfully deleting a website configuration on the specified bucket. You -will get a 200 OK response if the website configuration you are trying to delete does not -exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the -request does not exist. This DELETE action requires the S3:DeleteBucketWebsite permission. -By default, only the bucket owner can delete the website configuration attached to a -bucket. However, bucket owners can grant other users permission to delete the website -configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite -permission. For more information about hosting websites, see Hosting Websites on Amazon -S3. The following operations are related to DeleteBucketWebsite: GetBucketWebsite -PutBucketWebsite + This operation is not supported by directory buckets. This action removes the website +configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting +a website configuration on the specified bucket. You will get a 200 OK response if the +website configuration you are trying to delete does not exist on the bucket. Amazon S3 +returns a 404 response if the bucket specified in the request does not exist. This DELETE +action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner +can delete the website configuration attached to a bucket. However, bucket owners can grant +other users permission to delete the website configuration by writing a bucket policy +granting them the S3:DeleteBucketWebsite permission. For more information about hosting +websites, see Hosting Websites on Amazon S3. The following operations are related to +DeleteBucketWebsite: GetBucketWebsite PutBucketWebsite # Arguments - `bucket`: The bucket name for which you want to remove the website configuration. @@ -1347,8 +1775,8 @@ PutBucketWebsite # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_bucket_website(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1374,49 +1802,97 @@ end delete_object(bucket, key) delete_object(bucket, key, params::Dict{String,<:Any}) -Removes the null version (if there is one) of an object and inserts a delete marker, which -becomes the latest version of the object. If there isn't a null version, Amazon S3 does not -remove any objects but will still respond that the command was successful. To remove a -specific version, you must use the version Id subresource. Using this subresource -permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets -the response header, x-amz-delete-marker, to true. If the object you want to delete is in -a bucket where the bucket versioning configuration is MFA Delete enabled, you must include -the x-amz-mfa request header in the DELETE versionId request. Requests that include -x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete. To -see sample requests that use versioning, see Sample Request. You can delete objects by -explicitly calling DELETE Object or configure its lifecycle (PutBucketLifecycle) to enable -Amazon S3 to remove them for you. If you want to block users or accounts from removing or -deleting objects from your bucket, you must deny them the s3:DeleteObject, -s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions. The following action is -related to DeleteObject: PutObject +Removes an object from a bucket. The behavior depends on the bucket's versioning state: +If bucket versioning is not enabled, the operation permanently deletes the object. If +bucket versioning is enabled, the operation inserts a delete marker, which becomes the +current version of the object. To permanently delete an object in a versioned bucket, you +must include the object’s versionId in the request. For more information about +versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket. + If bucket versioning is suspended, the operation removes the object that has a null +versionId, if there is one, and inserts a delete marker that becomes the current version of +the object. If there isn't an object with a null versionId, and all versions of the object +have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To +permanently delete an object that has a versionId, you must include the object’s +versionId in the request. For more information about versioning-suspended buckets, see +Deleting objects from versioning-suspended buckets. Directory buckets - S3 Versioning +isn't enabled and supported for directory buckets. For this API operation, only the null +value of the version ID is supported by directory buckets. You can only specify null to the +versionId query parameter in the request. Directory buckets - For directory buckets, you +must make requests for this API operation to the Zonal endpoint. These endpoints support +virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. To remove a specific version, you must use the versionId query parameter. Using +this query parameter permanently deletes the version. If the object deleted is a delete +marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you +want to delete is in a bucket where the bucket versioning configuration is MFA Delete +enabled, you must include the x-amz-mfa request header in the DELETE versionId request. +Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see +Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, +see Sample Request. Directory buckets - MFA delete is not supported by directory +buckets. You can delete objects by explicitly calling DELETE Object or calling +(PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users +or accounts from removing or deleting objects from your bucket, you must deny them the +s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions. +Directory buckets - S3 Lifecycle is not supported by directory buckets. Permissions +General purpose bucket permissions - The following permissions are required in your +policies when your DeleteObjects request includes specific headers. s3:DeleteObject - +To delete an object from a bucket, you must always have the s3:DeleteObject permission. +s3:DeleteObjectVersion - To delete a specific version of an object from a +versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. +Directory bucket permissions - To grant access to this API operation on a directory bucket, +we recommend that you use the CreateSession API operation for session-based +authorization. Specifically, you grant the s3express:CreateSession permission to the +directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the +CreateSession API call on the bucket to obtain a session token. With the session token in +your request header, you can make API requests to this operation. After the session token +expires, you make another CreateSession API call to generate a new session token for use. +Amazon Web Services CLI or SDKs create session and refresh the session token automatically +to avoid service interruptions when a session expires. For more information about +authorization, see CreateSession . HTTP Host header syntax Directory buckets - The +HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The +following action is related to DeleteObject: PutObject # Arguments -- `bucket`: The bucket name of the bucket containing the object. When using this action - with an access point, you must direct requests to the access point hostname. The access - point hostname takes the form +- `bucket`: The bucket name of the bucket containing the object. Directory buckets - When + you use this operation with a directory bucket, you must use virtual-hosted-style requests + in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are + not supported. Directory bucket names must be unique in the chosen Availability Zone. + Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key name of the object to delete. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"versionId"`: VersionId used to reference a specific version of the object. +- `"versionId"`: Version ID used to reference a specific version of the object. For + directory buckets in this API operation, only the null value of the version ID is + supported. - `"x-amz-bypass-governance-retention"`: Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation. To use this header, you must have - the s3:BypassGovernanceRetention permission. + the s3:BypassGovernanceRetention permission. This functionality is not supported for + directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-mfa"`: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently - delete a versioned object if versioning is configured with MFA delete enabled. + delete a versioned object if versioning is configured with MFA delete enabled. This + functionality is not supported for directory buckets. - `"x-amz-request-payer"`: """ function delete_object(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1446,34 +1922,37 @@ end delete_object_tagging(bucket, key) delete_object_tagging(bucket, key, params::Dict{String,<:Any}) -Removes the entire tag set from the specified object. For more information about managing -object tags, see Object Tagging. To use this operation, you must have permission to -perform the s3:DeleteObjectTagging action. To delete tags of a specific object version, add -the versionId query parameter in the request. You will need permission for the -s3:DeleteObjectVersionTagging action. The following operations are related to -DeleteObjectTagging: PutObjectTagging GetObjectTagging + This operation is not supported by directory buckets. Removes the entire tag set from the +specified object. For more information about managing object tags, see Object Tagging. To +use this operation, you must have permission to perform the s3:DeleteObjectTagging action. +To delete tags of a specific object version, add the versionId query parameter in the +request. You will need permission for the s3:DeleteObjectVersionTagging action. The +following operations are related to DeleteObjectTagging: PutObjectTagging +GetObjectTagging # Arguments -- `bucket`: The bucket name containing the objects from which to remove the tags. When - using this action with an access point, you must direct requests to the access point - hostname. The access point hostname takes the form - AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with - an access point through the Amazon Web Services SDKs, you provide the access point ARN in - place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. +- `bucket`: The bucket name containing the objects from which to remove the tags. Access + points - When you use this action with an access point, you must provide the alias of the + access point in place of the bucket name or specify the access point ARN. When using the + access point ARN, you must direct requests to the access point hostname. The access point + hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When + using this action with an access point through the Amazon Web Services SDKs, you provide + the access point ARN in place of the bucket name. For more information about access point + ARNs, see Using access points in the Amazon S3 User Guide. S3 on Outposts - When you use + this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + hostname. The S3 on Outposts hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The key that identifies the object in the bucket from which to remove all tags. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"versionId"`: The versionId of the object that the tag-set will be removed from. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_object_tagging( Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config() @@ -1504,64 +1983,113 @@ end delete_objects(bucket, delete) delete_objects(bucket, delete, params::Dict{String,<:Any}) -This action enables you to delete multiple objects from a bucket using a single HTTP -request. If you know the object keys that you want to delete, then this action provides a -suitable alternative to sending individual delete requests, reducing per-request overhead. -The request contains a list of up to 1000 keys that you want to delete. In the XML, you -provide the object key names, and optionally, version IDs if you want to delete a specific -version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a -delete action and returns the result of that delete, success, or failure, in the response. -Note that if the object specified in the request is not found, Amazon S3 returns the result -as deleted. The action supports two modes for the response: verbose and quiet. By default, -the action uses verbose mode in which the response includes the result of deletion of each -key in your request. In quiet mode the response includes only keys where the delete action -encountered an error. For a successful deletion, the action does not return any information -about the delete in the response body. When performing this action on an MFA Delete enabled -bucket, that attempts to delete any versioned objects, you must include an MFA token. If -you do not provide one, the entire request will fail, even if there are non-versioned -objects you are trying to delete. If you provide an invalid token, whether there are -versioned keys in the request or not, the entire Multi-Object Delete request will fail. For -information about MFA Delete, see MFA Delete. Finally, the Content-MD5 header is required -for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your -request body has not been altered in transit. The following operations are related to +This operation enables you to delete multiple objects from a bucket using a single HTTP +request. If you know the object keys that you want to delete, then this operation provides +a suitable alternative to sending individual delete requests, reducing per-request +overhead. The request can contain a list of up to 1000 keys that you want to delete. In the +XML, you provide the object key names, and optionally, version IDs if you want to delete a +specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 +performs a delete operation and returns the result of that delete, success or failure, in +the response. Note that if the object specified in the request is not found, Amazon S3 +returns the result as deleted. Directory buckets - S3 Versioning isn't enabled and +supported for directory buckets. Directory buckets - For directory buckets, you must +make requests for this API operation to the Zonal endpoint. These endpoints support +virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. The operation supports two modes for the response: verbose and quiet. By default, +the operation uses verbose mode in which the response includes the result of deletion of +each key in your request. In quiet mode the response includes only keys where the delete +operation encountered an error. For a successful deletion in a quiet mode, the operation +does not return any information about the delete in the response body. When performing this +action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you +must include an MFA token. If you do not provide one, the entire request will fail, even if +there are non-versioned objects you are trying to delete. If you provide an invalid token, +whether there are versioned keys in the request or not, the entire Multi-Object Delete +request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User +Guide. Directory buckets - MFA delete is not supported by directory buckets. +Permissions General purpose bucket permissions - The following permissions are required +in your policies when your DeleteObjects request includes specific headers. +s3:DeleteObject - To delete an object from a bucket, you must always specify the +s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of +an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +permission. Directory bucket permissions - To grant access to this API operation on a +directory bucket, we recommend that you use the CreateSession API operation for +session-based authorization. Specifically, you grant the s3express:CreateSession permission +to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make +the CreateSession API call on the bucket to obtain a session token. With the session token +in your request header, you can make API requests to this operation. After the session +token expires, you make another CreateSession API call to generate a new session token for +use. Amazon Web Services CLI or SDKs create session and refresh the session token +automatically to avoid service interruptions when a session expires. For more information +about authorization, see CreateSession . Content-MD5 request header General purpose +bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. +Amazon S3 uses the header value to ensure that your request body has not been altered in +transit. Directory bucket - The Content-MD5 request header or a additional checksum +request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, +or x-amz-checksum-sha256) is required for all Multi-Object Delete requests. HTTP Host +header syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to DeleteObjects: CreateMultipartUpload UploadPart CompleteMultipartUpload ListParts AbortMultipartUpload # Arguments -- `bucket`: The bucket name containing the objects to delete. When using this action with - an access point, you must direct requests to the access point hostname. The access point - hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - using this action with an access point through the Amazon Web Services SDKs, you provide - the access point ARN in place of the bucket name. For more information about access point - ARNs, see Using access points in the Amazon S3 User Guide. When you use this action with - Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - Outposts hostname takes the form +- `bucket`: The bucket name containing the objects to delete. Directory buckets - When + you use this operation with a directory bucket, you must use virtual-hosted-style requests + in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are + not supported. Directory bucket names must be unique in the chosen Availability Zone. + Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `delete`: Container for the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-bypass-governance-retention"`: Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have - the s3:BypassGovernanceRetention permission. + the s3:BypassGovernanceRetention permission. This functionality is not supported for + directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-mfa"`: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently - delete a versioned object if versioning is configured with MFA delete enabled. + delete a versioned object if versioning is configured with MFA delete enabled. When + performing the DeleteObjects operation on an MFA delete enabled bucket, which attempts to + delete the specified versioned objects, you must include an MFA token. If you don't provide + an MFA token, the entire request will fail, even if there are non-versioned objects that + you are trying to delete. If you provide an invalid token, whether there are versioned + object keys in the request or not, the entire Multi-Object Delete request will fail. For + information about MFA Delete, see MFA Delete in the Amazon S3 User Guide. This + functionality is not supported for directory buckets. - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it - match the checksum value supplied in the CreateMultipartUpload request. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm + header, replace algorithm with the supported algorithm from the following list: CRC32 + CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon + S3 User Guide. If the individual checksum value you provide through + x-amz-checksum-algorithm doesn't match the checksum algorithm you set through + x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter + and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm + . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + parameter. """ function delete_objects(Bucket, Delete; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1591,12 +2119,13 @@ end delete_public_access_block(bucket) delete_public_access_block(bucket, params::Dict{String,<:Any}) -Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -you must have the s3:PutBucketPublicAccessBlock permission. For more information about -permissions, see Permissions Related to Bucket Subresource Operations and Managing Access -Permissions to Your Amazon S3 Resources. The following operations are related to -DeletePublicAccessBlock: Using Amazon S3 Block Public Access GetPublicAccessBlock - PutPublicAccessBlock GetBucketPolicyStatus + This operation is not supported by directory buckets. Removes the PublicAccessBlock +configuration for an Amazon S3 bucket. To use this operation, you must have the +s3:PutBucketPublicAccessBlock permission. For more information about permissions, see +Permissions Related to Bucket Subresource Operations and Managing Access Permissions to +Your Amazon S3 Resources. The following operations are related to DeletePublicAccessBlock: + Using Amazon S3 Block Public Access GetPublicAccessBlock PutPublicAccessBlock +GetBucketPolicyStatus # Arguments - `bucket`: The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. @@ -1604,8 +2133,8 @@ DeletePublicAccessBlock: Using Amazon S3 Block Public Access GetPublicAcc # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function delete_public_access_block( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -1633,15 +2162,16 @@ end get_bucket_accelerate_configuration(bucket) get_bucket_accelerate_configuration(bucket, params::Dict{String,<:Any}) -This implementation of the GET action uses the accelerate subresource to return the -Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 -Transfer Acceleration is a bucket-level feature that enables you to perform faster data -transfers to and from Amazon S3. To use this operation, you must have permission to perform -the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. -The bucket owner can grant this permission to others. For more information about -permissions, see Permissions Related to Bucket Subresource Operations and Managing Access -Permissions to your Amazon S3 Resources in the Amazon S3 User Guide. You set the Transfer -Acceleration state of an existing bucket to Enabled or Suspended by using the + This operation is not supported by directory buckets. This implementation of the GET +action uses the accelerate subresource to return the Transfer Acceleration state of a +bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a +bucket-level feature that enables you to perform faster data transfers to and from Amazon +S3. To use this operation, you must have permission to perform the +s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The +bucket owner can grant this permission to others. For more information about permissions, +see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to +your Amazon S3 Resources in the Amazon S3 User Guide. You set the Transfer Acceleration +state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation. A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket. For more information about @@ -1655,8 +2185,9 @@ PutBucketAccelerateConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function get_bucket_accelerate_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -1684,34 +2215,36 @@ end get_bucket_acl(bucket) get_bucket_acl(bucket, params::Dict{String,<:Any}) -This implementation of the GET action uses the acl subresource to return the access control -list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP -access to the bucket. If READ_ACP permission is granted to the anonymous user, you can -return the ACL of the bucket without using an authorization header. To use this API -operation against an access point, provide the alias of the access point in place of the -bucket name. To use this API operation against an Object Lambda access point, provide the -alias of the Object Lambda access point in place of the bucket name. If the Object Lambda -access point alias in a request is not valid, the error code InvalidAccessPointAliasError -is returned. For more information about InvalidAccessPointAliasError, see List of Error -Codes. If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -requests to read ACLs are still supported and return the bucket-owner-full-control ACL with -the owner being the account that created the bucket. For more information, see Controlling -object ownership and disabling ACLs in the Amazon S3 User Guide. The following operations -are related to GetBucketAcl: ListObjects + This operation is not supported by directory buckets. This implementation of the GET +action uses the acl subresource to return the access control list (ACL) of a bucket. To use +GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If +READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket +without using an authorization header. When you use this API operation with an access +point, provide the alias of the access point in place of the bucket name. When you use this +API operation with an Object Lambda access point, provide the alias of the Object Lambda +access point in place of the bucket name. If the Object Lambda access point alias in a +request is not valid, the error code InvalidAccessPointAliasError is returned. For more +information about InvalidAccessPointAliasError, see List of Error Codes. If your bucket +uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are +still supported and return the bucket-owner-full-control ACL with the owner being the +account that created the bucket. For more information, see Controlling object ownership +and disabling ACLs in the Amazon S3 User Guide. The following operations are related to +GetBucketAcl: ListObjects # Arguments -- `bucket`: Specifies the S3 bucket whose ACL is being requested. To use this API operation - against an access point, provide the alias of the access point in place of the bucket name. - To use this API operation against an Object Lambda access point, provide the alias of the - Object Lambda access point in place of the bucket name. If the Object Lambda access point - alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. - For more information about InvalidAccessPointAliasError, see List of Error Codes. +- `bucket`: Specifies the S3 bucket whose ACL is being requested. When you use this API + operation with an access point, provide the alias of the access point in place of the + bucket name. When you use this API operation with an Object Lambda access point, provide + the alias of the Object Lambda access point in place of the bucket name. If the Object + Lambda access point alias in a request is not valid, the error code + InvalidAccessPointAliasError is returned. For more information about + InvalidAccessPointAliasError, see List of Error Codes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_acl(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1734,16 +2267,17 @@ end get_bucket_analytics_configuration(bucket, id) get_bucket_analytics_configuration(bucket, id, params::Dict{String,<:Any}) -This implementation of the GET action returns an analytics configuration (identified by the -analytics configuration ID) from the bucket. To use this operation, you must have -permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this -permission by default. The bucket owner can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations -and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. -For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage -Class Analysis in the Amazon S3 User Guide. The following operations are related to -GetBucketAnalyticsConfiguration: DeleteBucketAnalyticsConfiguration -ListBucketAnalyticsConfigurations PutBucketAnalyticsConfiguration + This operation is not supported by directory buckets. This implementation of the GET +action returns an analytics configuration (identified by the analytics configuration ID) +from the bucket. To use this operation, you must have permissions to perform the +s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The +bucket owner can grant this permission to others. For more information about permissions, +see Permissions Related to Bucket Subresource Operations and Managing Access Permissions +to Your Amazon S3 Resources in the Amazon S3 User Guide. For information about Amazon S3 +analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon S3 User +Guide. The following operations are related to GetBucketAnalyticsConfiguration: +DeleteBucketAnalyticsConfiguration ListBucketAnalyticsConfigurations +PutBucketAnalyticsConfiguration # Arguments - `bucket`: The name of the bucket from which an analytics configuration is retrieved. @@ -1752,8 +2286,8 @@ ListBucketAnalyticsConfigurations PutBucketAnalyticsConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_analytics_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -1785,32 +2319,32 @@ end get_bucket_cors(bucket) get_bucket_cors(bucket, params::Dict{String,<:Any}) -Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the -bucket. To use this operation, you must have permission to perform the s3:GetBucketCORS -action. By default, the bucket owner has this permission and can grant it to others. To use -this API operation against an access point, provide the alias of the access point in place -of the bucket name. To use this API operation against an Object Lambda access point, -provide the alias of the Object Lambda access point in place of the bucket name. If the -Object Lambda access point alias in a request is not valid, the error code -InvalidAccessPointAliasError is returned. For more information about -InvalidAccessPointAliasError, see List of Error Codes. For more information about CORS, -see Enabling Cross-Origin Resource Sharing. The following operations are related to -GetBucketCors: PutBucketCors DeleteBucketCors + This operation is not supported by directory buckets. Returns the Cross-Origin Resource +Sharing (CORS) configuration information set for the bucket. To use this operation, you +must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner +has this permission and can grant it to others. When you use this API operation with an +access point, provide the alias of the access point in place of the bucket name. When you +use this API operation with an Object Lambda access point, provide the alias of the Object +Lambda access point in place of the bucket name. If the Object Lambda access point alias in +a request is not valid, the error code InvalidAccessPointAliasError is returned. For more +information about InvalidAccessPointAliasError, see List of Error Codes. For more +information about CORS, see Enabling Cross-Origin Resource Sharing. The following +operations are related to GetBucketCors: PutBucketCors DeleteBucketCors # Arguments -- `bucket`: The bucket name for which to get the cors configuration. To use this API - operation against an access point, provide the alias of the access point in place of the - bucket name. To use this API operation against an Object Lambda access point, provide the - alias of the Object Lambda access point in place of the bucket name. If the Object Lambda - access point alias in a request is not valid, the error code InvalidAccessPointAliasError - is returned. For more information about InvalidAccessPointAliasError, see List of Error - Codes. +- `bucket`: The bucket name for which to get the cors configuration. When you use this API + operation with an access point, provide the alias of the access point in place of the + bucket name. When you use this API operation with an Object Lambda access point, provide + the alias of the Object Lambda access point in place of the bucket name. If the Object + Lambda access point alias in a request is not valid, the error code + InvalidAccessPointAliasError is returned. For more information about + InvalidAccessPointAliasError, see List of Error Codes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_cors(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1833,16 +2367,16 @@ end get_bucket_encryption(bucket) get_bucket_encryption(bucket, params::Dict{String,<:Any}) -Returns the default encryption configuration for an Amazon S3 bucket. By default, all -buckets have a default encryption configuration that uses server-side encryption with -Amazon S3 managed keys (SSE-S3). For information about the bucket default encryption -feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this -operation, you must have permission to perform the s3:GetEncryptionConfiguration action. -The bucket owner has this permission by default. The bucket owner can grant this permission -to others. For more information about permissions, see Permissions Related to Bucket -Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. The -following operations are related to GetBucketEncryption: PutBucketEncryption -DeleteBucketEncryption + This operation is not supported by directory buckets. Returns the default encryption +configuration for an Amazon S3 bucket. By default, all buckets have a default encryption +configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). For +information about the bucket default encryption feature, see Amazon S3 Bucket Default +Encryption in the Amazon S3 User Guide. To use this operation, you must have permission to +perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by +default. The bucket owner can grant this permission to others. For more information about +permissions, see Permissions Related to Bucket Subresource Operations and Managing Access +Permissions to Your Amazon S3 Resources. The following operations are related to +GetBucketEncryption: PutBucketEncryption DeleteBucketEncryption # Arguments - `bucket`: The name of the bucket from which the server-side encryption configuration is @@ -1851,8 +2385,8 @@ DeleteBucketEncryption # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_encryption(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -1878,21 +2412,22 @@ end get_bucket_intelligent_tiering_configuration(bucket, id) get_bucket_intelligent_tiering_configuration(bucket, id, params::Dict{String,<:Any}) -Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 -Intelligent-Tiering storage class is designed to optimize storage costs by automatically -moving data to the most cost-effective storage access tier, without performance impact or -operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low -latency and high throughput access tiers. To get the lowest storage cost on data that can -be accessed in minutes to hours, you can choose to activate additional archiving -capabilities. The S3 Intelligent-Tiering storage class is the ideal storage class for data -with unknown, changing, or unpredictable access patterns, independent of object size or -retention period. If the size of an object is less than 128 KB, it is not monitored and not -eligible for auto-tiering. Smaller objects can be stored, but they are always charged at -the Frequent Access tier rates in the S3 Intelligent-Tiering storage class. For more -information, see Storage class for automatically optimizing frequently and infrequently -accessed objects. Operations related to GetBucketIntelligentTieringConfiguration include: - DeleteBucketIntelligentTieringConfiguration PutBucketIntelligentTieringConfiguration - ListBucketIntelligentTieringConfigurations + This operation is not supported by directory buckets. Gets the S3 Intelligent-Tiering +configuration from the specified bucket. The S3 Intelligent-Tiering storage class is +designed to optimize storage costs by automatically moving data to the most cost-effective +storage access tier, without performance impact or operational overhead. S3 +Intelligent-Tiering delivers automatic cost savings in three low latency and high +throughput access tiers. To get the lowest storage cost on data that can be accessed in +minutes to hours, you can choose to activate additional archiving capabilities. The S3 +Intelligent-Tiering storage class is the ideal storage class for data with unknown, +changing, or unpredictable access patterns, independent of object size or retention period. +If the size of an object is less than 128 KB, it is not monitored and not eligible for +auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent +Access tier rates in the S3 Intelligent-Tiering storage class. For more information, see +Storage class for automatically optimizing frequently and infrequently accessed objects. +Operations related to GetBucketIntelligentTieringConfiguration include: +DeleteBucketIntelligentTieringConfiguration PutBucketIntelligentTieringConfiguration + ListBucketIntelligentTieringConfigurations # Arguments - `bucket`: The name of the Amazon S3 bucket whose configuration you want to modify or @@ -1930,15 +2465,15 @@ end get_bucket_inventory_configuration(bucket, id) get_bucket_inventory_configuration(bucket, id, params::Dict{String,<:Any}) -Returns an inventory configuration (identified by the inventory configuration ID) from the -bucket. To use this operation, you must have permissions to perform the -s3:GetInventoryConfiguration action. The bucket owner has this permission by default and -can grant this permission to others. For more information about permissions, see -Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. For information about the Amazon S3 inventory feature, see Amazon -S3 Inventory. The following operations are related to GetBucketInventoryConfiguration: -DeleteBucketInventoryConfiguration ListBucketInventoryConfigurations -PutBucketInventoryConfiguration + This operation is not supported by directory buckets. Returns an inventory configuration +(identified by the inventory configuration ID) from the bucket. To use this operation, you +must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner +has this permission by default and can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources. For information about the Amazon +S3 inventory feature, see Amazon S3 Inventory. The following operations are related to +GetBucketInventoryConfiguration: DeleteBucketInventoryConfiguration +ListBucketInventoryConfigurations PutBucketInventoryConfiguration # Arguments - `bucket`: The name of the bucket containing the inventory configuration to retrieve. @@ -1947,8 +2482,8 @@ PutBucketInventoryConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_inventory_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -1982,17 +2517,18 @@ end For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this -topic. This topic is provided for backward compatibility. Returns the lifecycle -configuration information set on the bucket. For information about lifecycle configuration, -see Object Lifecycle Management. To use this operation, you must have permission to -perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by -default. The bucket owner can grant this permission to others. For more information about -permissions, see Permissions Related to Bucket Subresource Operations and Managing Access -Permissions to Your Amazon S3 Resources. GetBucketLifecycle has the following special -error: Error code: NoSuchLifecycleConfiguration Description: The lifecycle -configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: -Client The following operations are related to GetBucketLifecycle: -GetBucketLifecycleConfiguration PutBucketLifecycle DeleteBucketLifecycle +topic. This topic is provided for backward compatibility. This operation is not supported +by directory buckets. Returns the lifecycle configuration information set on the bucket. +For information about lifecycle configuration, see Object Lifecycle Management. To use +this operation, you must have permission to perform the s3:GetLifecycleConfiguration +action. The bucket owner has this permission by default. The bucket owner can grant this +permission to others. For more information about permissions, see Permissions Related to +Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. +GetBucketLifecycle has the following special error: Error code: +NoSuchLifecycleConfiguration Description: The lifecycle configuration does not exist. +HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client The following +operations are related to GetBucketLifecycle: GetBucketLifecycleConfiguration +PutBucketLifecycle DeleteBucketLifecycle # Arguments - `bucket`: The name of the bucket for which to get the lifecycle information. @@ -2000,8 +2536,8 @@ GetBucketLifecycleConfiguration PutBucketLifecycle DeleteBucketLifecycle # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_lifecycle(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2027,22 +2563,26 @@ end get_bucket_lifecycle_configuration(bucket) get_bucket_lifecycle_configuration(bucket, params::Dict{String,<:Any}) - Bucket lifecycle configuration now supports specifying a lifecycle rule using an object -key name prefix, one or more object tags, or a combination of both. Accordingly, this -section describes the latest API. The response describes the new filter element that you -can use to specify a filter to select a subset of objects to which the rule applies. If you -are using a previous version of the lifecycle configuration, it still works. For the -earlier action, see GetBucketLifecycle. Returns the lifecycle configuration information -set on the bucket. For information about lifecycle configuration, see Object Lifecycle -Management. To use this operation, you must have permission to perform the -s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. GetBucketLifecycleConfiguration has the following special error: - Error code: NoSuchLifecycleConfiguration Description: The lifecycle configuration does -not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client The -following operations are related to GetBucketLifecycleConfiguration: GetBucketLifecycle - PutBucketLifecycle DeleteBucketLifecycle + This operation is not supported by directory buckets. Bucket lifecycle configuration now +supports specifying a lifecycle rule using an object key name prefix, one or more object +tags, object size, or any combination of these. Accordingly, this section describes the +latest API. The previous version of the API supported filtering based only on an object key +name prefix, which is supported for backward compatibility. For the related API +description, see GetBucketLifecycle. Accordingly, this section describes the latest API. +The response describes the new filter element that you can use to specify a filter to +select a subset of objects to which the rule applies. If you are using a previous version +of the lifecycle configuration, it still works. For the earlier action, Returns the +lifecycle configuration information set on the bucket. For information about lifecycle +configuration, see Object Lifecycle Management. To use this operation, you must have +permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this +permission, by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources. GetBucketLifecycleConfiguration +has the following special error: Error code: NoSuchLifecycleConfiguration Description: +The lifecycle configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +Code Prefix: Client The following operations are related to +GetBucketLifecycleConfiguration: GetBucketLifecycle PutBucketLifecycle +DeleteBucketLifecycle # Arguments - `bucket`: The name of the bucket for which to get the lifecycle information. @@ -2050,8 +2590,8 @@ following operations are related to GetBucketLifecycleConfiguration: GetBucke # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_lifecycle_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2079,31 +2619,33 @@ end get_bucket_location(bucket) get_bucket_location(bucket, params::Dict{String,<:Any}) -Returns the Region the bucket resides in. You set the bucket's Region using the -LocationConstraint request parameter in a CreateBucket request. For more information, see -CreateBucket. To use this API operation against an access point, provide the alias of the -access point in place of the bucket name. To use this API operation against an Object -Lambda access point, provide the alias of the Object Lambda access point in place of the -bucket name. If the Object Lambda access point alias in a request is not valid, the error -code InvalidAccessPointAliasError is returned. For more information about + This operation is not supported by directory buckets. Returns the Region the bucket +resides in. You set the bucket's Region using the LocationConstraint request parameter in a +CreateBucket request. For more information, see CreateBucket. When you use this API +operation with an access point, provide the alias of the access point in place of the +bucket name. When you use this API operation with an Object Lambda access point, provide +the alias of the Object Lambda access point in place of the bucket name. If the Object +Lambda access point alias in a request is not valid, the error code +InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes. We recommend that you use HeadBucket to return the Region that a bucket resides in. For backward compatibility, Amazon S3 continues to support GetBucketLocation. The following operations are related to GetBucketLocation: GetObject CreateBucket # Arguments -- `bucket`: The name of the bucket for which to get the location. To use this API operation - against an access point, provide the alias of the access point in place of the bucket name. - To use this API operation against an Object Lambda access point, provide the alias of the - Object Lambda access point in place of the bucket name. If the Object Lambda access point - alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. - For more information about InvalidAccessPointAliasError, see List of Error Codes. +- `bucket`: The name of the bucket for which to get the location. When you use this API + operation with an access point, provide the alias of the access point in place of the + bucket name. When you use this API operation with an Object Lambda access point, provide + the alias of the Object Lambda access point in place of the bucket name. If the Object + Lambda access point alias in a request is not valid, the error code + InvalidAccessPointAliasError is returned. For more information about + InvalidAccessPointAliasError, see List of Error Codes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_location(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2126,9 +2668,9 @@ end get_bucket_logging(bucket) get_bucket_logging(bucket, params::Dict{String,<:Any}) -Returns the logging status of a bucket and the permissions users have to view and modify -that status. The following operations are related to GetBucketLogging: CreateBucket -PutBucketLogging + This operation is not supported by directory buckets. Returns the logging status of a +bucket and the permissions users have to view and modify that status. The following +operations are related to GetBucketLogging: CreateBucket PutBucketLogging # Arguments - `bucket`: The bucket name for which to get the logging information. @@ -2136,8 +2678,8 @@ PutBucketLogging # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_logging(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2160,16 +2702,17 @@ end get_bucket_metrics_configuration(bucket, id) get_bucket_metrics_configuration(bucket, id, params::Dict{String,<:Any}) -Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. -Note that this doesn't include the daily storage metrics. To use this operation, you must -have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has -this permission by default. The bucket owner can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations and -Managing Access Permissions to Your Amazon S3 Resources. For information about CloudWatch -request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch. The following -operations are related to GetBucketMetricsConfiguration: PutBucketMetricsConfiguration - DeleteBucketMetricsConfiguration ListBucketMetricsConfigurations Monitoring -Metrics with Amazon CloudWatch + This operation is not supported by directory buckets. Gets a metrics configuration +(specified by the metrics configuration ID) from the bucket. Note that this doesn't include +the daily storage metrics. To use this operation, you must have permissions to perform the +s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The +bucket owner can grant this permission to others. For more information about permissions, +see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to +Your Amazon S3 Resources. For information about CloudWatch request metrics for Amazon S3, +see Monitoring Metrics with Amazon CloudWatch. The following operations are related to +GetBucketMetricsConfiguration: PutBucketMetricsConfiguration +DeleteBucketMetricsConfiguration ListBucketMetricsConfigurations Monitoring Metrics +with Amazon CloudWatch # Arguments - `bucket`: The name of the bucket containing the metrics configuration to retrieve. @@ -2179,8 +2722,8 @@ Metrics with Amazon CloudWatch # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_metrics_configuration( Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -2212,22 +2755,23 @@ end get_bucket_notification(bucket) get_bucket_notification(bucket, params::Dict{String,<:Any}) - No longer used, see GetBucketNotificationConfiguration. + This operation is not supported by directory buckets. No longer used, see +GetBucketNotificationConfiguration. # Arguments -- `bucket`: The name of the bucket for which to get the notification configuration. To use - this API operation against an access point, provide the alias of the access point in place - of the bucket name. To use this API operation against an Object Lambda access point, - provide the alias of the Object Lambda access point in place of the bucket name. If the - Object Lambda access point alias in a request is not valid, the error code +- `bucket`: The name of the bucket for which to get the notification configuration. When + you use this API operation with an access point, provide the alias of the access point in + place of the bucket name. When you use this API operation with an Object Lambda access + point, provide the alias of the Object Lambda access point in place of the bucket name. If + the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_notification(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2253,35 +2797,35 @@ end get_bucket_notification_configuration(bucket) get_bucket_notification_configuration(bucket, params::Dict{String,<:Any}) -Returns the notification configuration of a bucket. If notifications are not enabled on the -bucket, the action returns an empty NotificationConfiguration element. By default, you must -be the bucket owner to read the notification configuration of a bucket. However, the bucket -owner can use a bucket policy to grant permission to other users to read this configuration -with the s3:GetBucketNotification permission. To use this API operation against an access -point, provide the alias of the access point in place of the bucket name. To use this API -operation against an Object Lambda access point, provide the alias of the Object Lambda -access point in place of the bucket name. If the Object Lambda access point alias in a -request is not valid, the error code InvalidAccessPointAliasError is returned. For more -information about InvalidAccessPointAliasError, see List of Error Codes. For more -information about setting and reading the notification configuration on a bucket, see -Setting Up Notification of Bucket Events. For more information about bucket policies, see -Using Bucket Policies. The following action is related to GetBucketNotification: -PutBucketNotification + This operation is not supported by directory buckets. Returns the notification +configuration of a bucket. If notifications are not enabled on the bucket, the action +returns an empty NotificationConfiguration element. By default, you must be the bucket +owner to read the notification configuration of a bucket. However, the bucket owner can use +a bucket policy to grant permission to other users to read this configuration with the +s3:GetBucketNotification permission. When you use this API operation with an access point, +provide the alias of the access point in place of the bucket name. When you use this API +operation with an Object Lambda access point, provide the alias of the Object Lambda access +point in place of the bucket name. If the Object Lambda access point alias in a request is +not valid, the error code InvalidAccessPointAliasError is returned. For more information +about InvalidAccessPointAliasError, see List of Error Codes. For more information about +setting and reading the notification configuration on a bucket, see Setting Up Notification +of Bucket Events. For more information about bucket policies, see Using Bucket Policies. +The following action is related to GetBucketNotification: PutBucketNotification # Arguments -- `bucket`: The name of the bucket for which to get the notification configuration. To use - this API operation against an access point, provide the alias of the access point in place - of the bucket name. To use this API operation against an Object Lambda access point, - provide the alias of the Object Lambda access point in place of the bucket name. If the - Object Lambda access point alias in a request is not valid, the error code +- `bucket`: The name of the bucket for which to get the notification configuration. When + you use this API operation with an access point, provide the alias of the access point in + place of the bucket name. When you use this API operation with an Object Lambda access + point, provide the alias of the Object Lambda access point in place of the bucket name. If + the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_notification_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2309,12 +2853,12 @@ end get_bucket_ownership_controls(bucket) get_bucket_ownership_controls(bucket, params::Dict{String,<:Any}) -Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 -permissions, see Specifying permissions in a policy. For information about Amazon S3 -Object Ownership, see Using Object Ownership. The following operations are related to -GetBucketOwnershipControls: PutBucketOwnershipControls DeleteBucketOwnershipControls - + This operation is not supported by directory buckets. Retrieves OwnershipControls for an +Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls +permission. For more information about Amazon S3 permissions, see Specifying permissions in +a policy. For information about Amazon S3 Object Ownership, see Using Object Ownership. +The following operations are related to GetBucketOwnershipControls: +PutBucketOwnershipControls DeleteBucketOwnershipControls # Arguments - `bucket`: The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. @@ -2322,8 +2866,8 @@ GetBucketOwnershipControls: PutBucketOwnershipControls DeleteBucketOwners # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_ownership_controls( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2351,8 +2895,13 @@ end get_bucket_policy(bucket) get_bucket_policy(bucket, params::Dict{String,<:Any}) -Returns the policy of a specified bucket. If you are using an identity other than the root -user of the Amazon Web Services account that owns the bucket, the calling identity must +Returns the policy of a specified bucket. Directory buckets - For directory buckets, you +must make requests for this API operation to the Regional endpoint. These endpoints support +path-style requests in the format +https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style +requests aren't supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions If you are using an identity other than the root user +of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct @@ -2362,29 +2911,46 @@ inadvertently lock themselves out of their own buckets, the root principal in a owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these -API actions by VPC endpoint policies and Amazon Web Services Organizations policies. To -use this API operation against an access point, provide the alias of the access point in -place of the bucket name. To use this API operation against an Object Lambda access point, -provide the alias of the Object Lambda access point in place of the bucket name. If the -Object Lambda access point alias in a request is not valid, the error code -InvalidAccessPointAliasError is returned. For more information about -InvalidAccessPointAliasError, see List of Error Codes. For more information about bucket -policies, see Using Bucket Policies and User Policies. The following action is related to +API actions by VPC endpoint policies and Amazon Web Services Organizations policies. +General purpose bucket permissions - The s3:GetBucketPolicy permission is required in a +policy. For more information about general purpose buckets bucket policies, see Using +Bucket Policies and User Policies in the Amazon S3 User Guide. Directory bucket +permissions - To grant access to this API operation, you must have the +s3express:GetBucketPolicy permission in an IAM identity-based policy instead of a bucket +policy. Cross-account access to this API operation isn't supported. This operation can only +be performed by the Amazon Web Services account that owns the resource. For more +information about directory bucket policies and permissions, see Amazon Web Services +Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. + Example bucket policies General purpose buckets example bucket policies - See Bucket +policy examples in the Amazon S3 User Guide. Directory bucket example bucket policies - +See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host +header syntax Directory buckets - The HTTP Host header syntax is +s3express-control.region.amazonaws.com. The following action is related to GetBucketPolicy: GetObject # Arguments -- `bucket`: The bucket name for which to get the bucket policy. To use this API operation - against an access point, provide the alias of the access point in place of the bucket name. - To use this API operation against an Object Lambda access point, provide the alias of the - Object Lambda access point in place of the bucket name. If the Object Lambda access point - alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. - For more information about InvalidAccessPointAliasError, see List of Error Codes. +- `bucket`: The bucket name to get the bucket policy for. Directory buckets - When you + use this operation with a directory bucket, you must use path-style requests in the format + https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style + requests aren't supported. Directory bucket names must be unique in the chosen Availability + Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide Access points - When you use + this API operation with an access point, provide the alias of the access point in place of + the bucket name. Object Lambda access points - When you use this API operation with an + Object Lambda access point, provide the alias of the Object Lambda access point in place of + the bucket name. If the Object Lambda access point alias in a request is not valid, the + error code InvalidAccessPointAliasError is returned. For more information about + InvalidAccessPointAliasError, see List of Error Codes. Access points and Object Lambda + access points are not supported by directory buckets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this + header is not supported in this API operation. If you specify this header, the request + fails with the HTTP status code 501 Not Implemented. """ function get_bucket_policy(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2407,13 +2973,13 @@ end get_bucket_policy_status(bucket) get_bucket_policy_status(bucket, params::Dict{String,<:Any}) -Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is -public. In order to use this operation, you must have the s3:GetBucketPolicyStatus -permission. For more information about Amazon S3 permissions, see Specifying Permissions in -a Policy. For more information about when Amazon S3 considers a bucket public, see The -Meaning of \"Public\". The following operations are related to GetBucketPolicyStatus: -Using Amazon S3 Block Public Access GetPublicAccessBlock PutPublicAccessBlock -DeletePublicAccessBlock + This operation is not supported by directory buckets. Retrieves the policy status for an +Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, +you must have the s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +permissions, see Specifying Permissions in a Policy. For more information about when +Amazon S3 considers a bucket public, see The Meaning of \"Public\". The following +operations are related to GetBucketPolicyStatus: Using Amazon S3 Block Public Access +GetPublicAccessBlock PutPublicAccessBlock DeletePublicAccessBlock # Arguments - `bucket`: The name of the Amazon S3 bucket whose policy status you want to retrieve. @@ -2421,8 +2987,8 @@ DeletePublicAccessBlock # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_policy_status(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2448,17 +3014,17 @@ end get_bucket_replication(bucket) get_bucket_replication(bucket, params::Dict{String,<:Any}) -Returns the replication configuration of a bucket. It can take a while to propagate the -put or delete a replication configuration to all Amazon S3 systems. Therefore, a get -request soon after put or delete can return a wrong result. For information about -replication configuration, see Replication in the Amazon S3 User Guide. This action -requires permissions for the s3:GetReplicationConfiguration action. For more information -about permissions, see Using Bucket Policies and User Policies. If you include the Filter -element in a replication configuration, you must also include the DeleteMarkerReplication -and Priority elements. The response also returns those elements. For information about -GetBucketReplication errors, see List of replication-related error codes The following -operations are related to GetBucketReplication: PutBucketReplication -DeleteBucketReplication + This operation is not supported by directory buckets. Returns the replication +configuration of a bucket. It can take a while to propagate the put or delete a +replication configuration to all Amazon S3 systems. Therefore, a get request soon after put +or delete can return a wrong result. For information about replication configuration, +see Replication in the Amazon S3 User Guide. This action requires permissions for the +s3:GetReplicationConfiguration action. For more information about permissions, see Using +Bucket Policies and User Policies. If you include the Filter element in a replication +configuration, you must also include the DeleteMarkerReplication and Priority elements. The +response also returns those elements. For information about GetBucketReplication errors, +see List of replication-related error codes The following operations are related to +GetBucketReplication: PutBucketReplication DeleteBucketReplication # Arguments - `bucket`: The bucket name for which to get the replication information. @@ -2466,8 +3032,8 @@ DeleteBucketReplication # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_replication(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2493,9 +3059,10 @@ end get_bucket_request_payment(bucket) get_bucket_request_payment(bucket, params::Dict{String,<:Any}) -Returns the request payment configuration of a bucket. To use this version of the -operation, you must be the bucket owner. For more information, see Requester Pays Buckets. -The following operations are related to GetBucketRequestPayment: ListObjects + This operation is not supported by directory buckets. Returns the request payment +configuration of a bucket. To use this version of the operation, you must be the bucket +owner. For more information, see Requester Pays Buckets. The following operations are +related to GetBucketRequestPayment: ListObjects # Arguments - `bucket`: The name of the bucket for which to get the payment request configuration @@ -2503,8 +3070,8 @@ The following operations are related to GetBucketRequestPayment: ListObjects # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_request_payment( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2532,12 +3099,13 @@ end get_bucket_tagging(bucket) get_bucket_tagging(bucket, params::Dict{String,<:Any}) -Returns the tag set associated with the bucket. To use this operation, you must have -permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this -permission and can grant this permission to others. GetBucketTagging has the following -special error: Error code: NoSuchTagSet Description: There is no tag set associated -with the bucket. The following operations are related to GetBucketTagging: -PutBucketTagging DeleteBucketTagging + This operation is not supported by directory buckets. Returns the tag set associated with +the bucket. To use this operation, you must have permission to perform the +s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant +this permission to others. GetBucketTagging has the following special error: Error code: +NoSuchTagSet Description: There is no tag set associated with the bucket. The +following operations are related to GetBucketTagging: PutBucketTagging +DeleteBucketTagging # Arguments - `bucket`: The name of the bucket for which to get the tagging information. @@ -2545,8 +3113,8 @@ PutBucketTagging DeleteBucketTagging # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_tagging(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2569,12 +3137,12 @@ end get_bucket_versioning(bucket) get_bucket_versioning(bucket, params::Dict{String,<:Any}) -Returns the versioning state of a bucket. To retrieve the versioning state of a bucket, you -must be the bucket owner. This implementation also returns the MFA Delete status of the -versioning state. If the MFA Delete status is enabled, the bucket owner must use an -authentication device to change the versioning state of the bucket. The following -operations are related to GetBucketVersioning: GetObject PutObject DeleteObject - + This operation is not supported by directory buckets. Returns the versioning state of a +bucket. To retrieve the versioning state of a bucket, you must be the bucket owner. This +implementation also returns the MFA Delete status of the versioning state. If the MFA +Delete status is enabled, the bucket owner must use an authentication device to change the +versioning state of the bucket. The following operations are related to +GetBucketVersioning: GetObject PutObject DeleteObject # Arguments - `bucket`: The name of the bucket for which to get the versioning information. @@ -2582,8 +3150,8 @@ operations are related to GetBucketVersioning: GetObject PutObject De # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_versioning(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2609,14 +3177,14 @@ end get_bucket_website(bucket) get_bucket_website(bucket, params::Dict{String,<:Any}) -Returns the website configuration for a bucket. To host website on Amazon S3, you can -configure a bucket as website by adding a website configuration. For more information about -hosting websites, see Hosting Websites on Amazon S3. This GET action requires the -S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket -website configuration. However, bucket owners can allow other users to read the website -configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission. -The following operations are related to GetBucketWebsite: DeleteBucketWebsite -PutBucketWebsite + This operation is not supported by directory buckets. Returns the website configuration +for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding +a website configuration. For more information about hosting websites, see Hosting Websites +on Amazon S3. This GET action requires the S3:GetBucketWebsite permission. By default, +only the bucket owner can read the bucket website configuration. However, bucket owners can +allow other users to read the website configuration by writing a bucket policy granting +them the S3:GetBucketWebsite permission. The following operations are related to +GetBucketWebsite: DeleteBucketWebsite PutBucketWebsite # Arguments - `bucket`: The bucket name for which to get the website configuration. @@ -2624,8 +3192,8 @@ PutBucketWebsite # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_bucket_website(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2648,128 +3216,189 @@ end get_object(bucket, key) get_object(bucket, key, params::Dict{String,<:Any}) -Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If -you grant READ access to the anonymous user, you can return the object without using an -authorization header. An Amazon S3 bucket has no directory hierarchy such as you would find -in a typical computer file system. You can, however, create a logical hierarchy by using -object key names that imply a folder structure. For example, instead of naming an object -sample.jpg, you can name it photos/2006/February/sample.jpg. To get an object from such a -logical hierarchy, specify the full key name for the object in the GET operation. For a -virtual hosted-style request example, if you have the object -photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. -For a path-style request example, if you have the object photos/2006/February/sample.jpg in -the bucket named examplebucket, specify the resource as -/examplebucket/photos/2006/February/sample.jpg. For more information about request types, -see HTTP Host Header Bucket Specification. For more information about returning the ACL of -an object, see GetObjectAcl. If the object you are retrieving is stored in the S3 Glacier -or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first -restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState -error. For information about restoring archived objects, see Restoring Archived Objects. -Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET -requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side -encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these -types of keys, you’ll get an HTTP 400 BadRequest error. If you encrypt an object by using -server-side encryption with customer-provided encryption keys (SSE-C) when you store the -object in Amazon S3, then when you GET the object, you must use the following headers: -x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key - x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see -Server-Side Encryption (Using Customer-Provided Encryption Keys). Assuming you have the -relevant permission to read object tags, the response also returns the x-amz-tagging-count -header that provides the count of number of tags associated with the object. You can use -GetObjectTagging to retrieve the tag set associated with an object. Permissions You need -the relevant read object (or version) permission for this operation. For more information, -see Specifying Permissions in a Policy. If the object you request does not exist, the error -Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you -have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -will return an HTTP status code 403 (\"access denied\") error. Versioning By default, -the GET action returns the current version of an object. To return a different version, use -the versionId subresource. If you supply a versionId, you need the s3:GetObjectVersion -permission to access a specific version of an object. If you request a specific version, -you do not need to have the s3:GetObject permission. If you request the current version -without a specific version ID, only s3:GetObject permission is required. -s3:GetObjectVersion permission won't be required. If the current version of the object is -a delete marker, Amazon S3 behaves as if the object was deleted and includes -x-amz-delete-marker: true in the response. For more information about versioning, see -PutBucketVersioning. Overriding Response Header Values There are times when you want to -override certain response header values in a GET response. For example, you might override -the Content-Disposition response header value in your GET request. You can override values -for a set of response headers using the following query parameters. These response header -values are sent only on a successful request, that is, when status code 200 OK is returned. -The set of headers you can override using these parameters is a subset of the headers that -Amazon S3 accepts when you create an object. The response headers that you can override for -the GET response are Content-Type, Content-Language, Expires, Cache-Control, -Content-Disposition, and Content-Encoding. To override these header values in the GET -response, you use the following request parameters. You must sign the request, either -using an Authorization header or a presigned URL, when using these parameters. They cannot -be used with an unsigned (anonymous) request. response-content-type -response-content-language response-expires response-cache-control -response-content-disposition response-content-encoding Overriding Response Header -Values If both of the If-Match and If-Unmodified-Since headers are present in the request -as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition -evaluates to false; then, S3 returns 200 OK and the data requested. If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to -true; then, S3 returns 304 Not Modified response code. For more information about -conditional requests, see RFC 7232. The following operations are related to GetObject: -ListBuckets GetObjectAcl +Retrieves an object from Amazon S3. In the GetObject request, specify the full key name for +the object. General purpose buckets - Both the virtual-hosted-style requests and the +path-style requests are supported. For a virtual hosted-style request example, if you have +the object photos/2006/February/sample.jpg, specify the object key name as +/photos/2006/February/sample.jpg. For a path-style request example, if you have the object +photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key +name as /examplebucket/photos/2006/February/sample.jpg. For more information about request +types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide. Directory +buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style +request example, if you have the object photos/2006/February/sample.jpg in the bucket named +examplebucket--use1-az5--x-s3, specify the object key name as +/photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your +requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style +requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +Path-style requests are not supported. For more information, see Regional and Zonal +endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions +- You must have the required permissions in a policy. To use GetObject, you must have the +READ access to the object (or version). If you grant READ access to the anonymous user, the +GetObject operation returns the object without using an authorization header. For more +information, see Specifying permissions in a policy in the Amazon S3 User Guide. If you +include a versionId in your request header, you must have the s3:GetObjectVersion +permission to access a specific version of an object. The s3:GetObject permission is not +required in this scenario. If you request the current version of an object without a +specific versionId in the request header, only the s3:GetObject permission is required. The +s3:GetObjectVersion permission is not required in this scenario. If the object that you +request doesn’t exist, the error that Amazon S3 returns depends on whether you also have +the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, +Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the +s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Access Denied error. + Directory bucket permissions - To grant access to this API operation on a directory +bucket, we recommend that you use the CreateSession API operation for session-based +authorization. Specifically, you grant the s3express:CreateSession permission to the +directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the +CreateSession API call on the bucket to obtain a session token. With the session token in +your request header, you can make API requests to this operation. After the session token +expires, you make another CreateSession API call to generate a new session token for use. +Amazon Web Services CLI or SDKs create session and refresh the session token automatically +to avoid service interruptions when a session expires. For more information about +authorization, see CreateSession . Storage classes If the object you are retrieving is +stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive +storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 +Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must +first restore a copy using RestoreObject. Otherwise, this operation returns an +InvalidObjectState error. For information about restoring archived objects, see Restoring +Archived Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, +only the S3 Express One Zone storage class is supported to store newly created objects. +Unsupported storage class values won't write a destination object and will respond with the +HTTP status code 400 Bad Request. Encryption Encryption request headers, like +x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object +uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side +encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side +encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your +GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 +Bad Request error. Overriding response header values through the request There are times +when you want to override certain response header values of a GetObject response. For +example, you might override the Content-Disposition response header value through your +GetObject request. You can override values for a set of response headers. These modified +response header values are included only in a successful response, that is, when the HTTP +status code 200 OK is returned. The headers you can override using the following query +parameters in the request are a subset of the headers that Amazon S3 accepts when you +create an object. The response headers that you can override for the GetObject response +are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, +and Expires. To override values for a set of response headers in the GetObject response, +you can use the following query parameters in the request. response-cache-control +response-content-disposition response-content-encoding response-content-language + response-content-type response-expires When you use these parameters, you must +sign the request by using either an Authorization header or a presigned URL. These +parameters cannot be used with an unsigned (anonymous) request. HTTP Host header syntax +Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +GetObject: ListBuckets GetObjectAcl # Arguments -- `bucket`: The bucket name containing the object. When using this action with an access - point, you must direct requests to the access point hostname. The access point hostname - takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using - this action with an access point through the Amazon Web Services SDKs, you provide the - access point ARN in place of the bucket name. For more information about access point ARNs, - see Using access points in the Amazon S3 User Guide. When using an Object Lambda access - point the hostname takes the form - AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When you use this action - with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 - on Outposts hostname takes the form +- `bucket`: The bucket name containing the object. Directory buckets - When you use this + operation with a directory bucket, you must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Object Lambda access points - When you use this action + with an Object Lambda access point, you must direct requests to the Object Lambda access + point hostname. The Object Lambda access point hostname takes the form + AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. Access points and Object + Lambda access points are not supported by directory buckets. S3 on Outposts - When you + use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key of the object to get. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"If-Match"`: Return the object only if its entity tag (ETag) is the same as the one - specified; otherwise, return a 412 (precondition failed) error. + specified in this header; otherwise, return a 412 Precondition Failed error. If both of the + If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match + condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, + S3 returns 200 OK and the data requested. For more information about conditional requests, + see RFC 7232. - `"If-Modified-Since"`: Return the object only if it has been modified since the specified - time; otherwise, return a 304 (not modified) error. + time; otherwise, return a 304 Not Modified error. If both of the If-None-Match and + If-Modified-Since headers are present in the request as follows: If-None-Match condition + evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns + 304 Not Modified status code. For more information about conditional requests, see RFC 7232. - `"If-None-Match"`: Return the object only if its entity tag (ETag) is different from the - one specified; otherwise, return a 304 (not modified) error. + one specified in this header; otherwise, return a 304 Not Modified error. If both of the + If-None-Match and If-Modified-Since headers are present in the request as follows: + If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to + true; then, S3 returns 304 Not Modified HTTP status code. For more information about + conditional requests, see RFC 7232. - `"If-Unmodified-Since"`: Return the object only if it has not been modified since the - specified time; otherwise, return a 412 (precondition failed) error. -- `"Range"`: Downloads the specified range bytes of an object. For more information about + specified time; otherwise, return a 412 Precondition Failed error. If both of the If-Match + and If-Unmodified-Since headers are present in the request as follows: If-Match condition + evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns + 200 OK and the data requested. For more information about conditional requests, see RFC + 7232. +- `"Range"`: Downloads the specified byte range of an object. For more information about the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range. Amazon S3 doesn't support retrieving multiple ranges of data per GET request. - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object. - `"response-cache-control"`: Sets the Cache-Control header of the response. -- `"response-content-disposition"`: Sets the Content-Disposition header of the response +- `"response-content-disposition"`: Sets the Content-Disposition header of the response. - `"response-content-encoding"`: Sets the Content-Encoding header of the response. - `"response-content-language"`: Sets the Content-Language header of the response. - `"response-content-type"`: Sets the Content-Type header of the response. - `"response-expires"`: Sets the Expires header of the response. -- `"versionId"`: VersionId used to reference a specific version of the object. +- `"versionId"`: Version ID used to reference a specific version of the object. By default, + the GetObject operation returns the current version of an object. To return a different + version, use the versionId subresource. If you include a versionId in your request + header, you must have the s3:GetObjectVersion permission to access a specific version of an + object. The s3:GetObject permission is not required in this scenario. If you request the + current version of an object without a specific versionId in the request header, only the + s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in + this scenario. Directory buckets - S3 Versioning isn't enabled and supported for + directory buckets. For this API operation, only the null value of the version ID is + supported by directory buckets. You can only specify null to the versionId query parameter + in the request. For more information about versioning, see PutBucketVersioning. - `"x-amz-checksum-mode"`: To retrieve the checksum, this mode must be enabled. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when decrypting the object (for example, AES256). +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + decrypting the object (for example, AES256). If you encrypt an object by using server-side + encryption with customer-provided encryption keys (SSE-C) when you store the object in + Amazon S3, then when you GET the object, you must use the following headers: + x-amz-server-side-encryption-customer-algorithm + x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + Encryption Keys) in the Amazon S3 User Guide. This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption - key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when - recovering it and must match the one used when storing the data. The key must be - appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + key that you originally provided for Amazon S3 to encrypt the data before storing it. This + value is used to decrypt the object when recovering it and must match the one used when + storing the data. The key must be appropriate for use with the algorithm specified in the + x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object by using + server-side encryption with customer-provided encryption keys (SSE-C) when you store the + object in Amazon S3, then when you GET the object, you must use the following headers: + x-amz-server-side-encryption-customer-algorithm + x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + Encryption Keys) in the Amazon S3 User Guide. This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of - the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for + a message integrity check to ensure that the encryption key was transmitted without error. + If you encrypt an object by using server-side encryption with customer-provided encryption + keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must + use the following headers: x-amz-server-side-encryption-customer-algorithm + x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + Encryption Keys) in the Amazon S3 User Guide. This functionality is not supported for + directory buckets. """ function get_object(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -2795,13 +3424,14 @@ end get_object_acl(bucket, key) get_object_acl(bucket, key, params::Dict{String,<:Any}) -Returns the access control list (ACL) of an object. To use this operation, you must have -s3:GetObjectAcl permissions or READ_ACP access to the object. For more information, see -Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide This -action is not supported by Amazon S3 on Outposts. By default, GET returns ACL information -about the current version of an object. To return ACL information about a different -version, use the versionId subresource. If your bucket uses the bucket owner enforced -setting for S3 Object Ownership, requests to read ACLs are still supported and return the + This operation is not supported by directory buckets. Returns the access control list +(ACL) of an object. To use this operation, you must have s3:GetObjectAcl permissions or +READ_ACP access to the object. For more information, see Mapping of ACL permissions and +access policy permissions in the Amazon S3 User Guide This functionality is not supported +for Amazon S3 on Outposts. By default, GET returns ACL information about the current +version of an object. To return ACL information about a different version, use the +versionId subresource. If your bucket uses the bucket owner enforced setting for S3 Object +Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide. The following operations are related to GetObjectAcl: GetObject @@ -2809,8 +3439,10 @@ GetObjectAttributes DeleteObject PutObject # Arguments - `bucket`: The bucket name that contains the object for which to get the ACL information. - When using this action with an access point, you must direct requests to the access point - hostname. The access point hostname takes the form + Access points - When you use this action with an access point, you must provide the alias + of the access point in place of the bucket name or specify the access point ARN. When using + the access point ARN, you must direct requests to the access point hostname. The access + point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access @@ -2819,10 +3451,11 @@ GetObjectAttributes DeleteObject PutObject # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"versionId"`: VersionId used to reference a specific version of the object. +- `"versionId"`: Version ID used to reference a specific version of the object. This + functionality is not supported for directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function get_object_acl(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2852,80 +3485,122 @@ end get_object_attributes(bucket, key, x-amz-object-attributes) get_object_attributes(bucket, key, x-amz-object-attributes, params::Dict{String,<:Any}) -Retrieves all the metadata from an object without returning the object itself. This action -is useful if you're interested only in an object's metadata. To use GetObjectAttributes, -you must have READ access to the object. GetObjectAttributes combines the functionality of -HeadObject and ListParts. All of the data returned with each of those individual calls can -be returned with a single call to GetObjectAttributes. If you encrypt an object by using -server-side encryption with customer-provided encryption keys (SSE-C) when you store the -object in Amazon S3, then when you retrieve the metadata from the object, you must use the -following headers: x-amz-server-side-encryption-customer-algorithm +Retrieves all the metadata from an object without returning the object itself. This +operation is useful if you're interested only in an object's metadata. +GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data +returned with each of those individual calls can be returned with a single call to +GetObjectAttributes. Directory buckets - For directory buckets, you must make requests +for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +Path-style requests are not supported. For more information, see Regional and Zonal +endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions +- To use GetObjectAttributes, you must have READ access to the object. The permissions that +you need to use this operation with depend on whether the bucket is versioned. If the +bucket is versioned, you need both the s3:GetObjectVersion and +s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not +versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more +information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the +object that you request does not exist, the error Amazon S3 returns depends on whether you +also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the +bucket, Amazon S3 returns an HTTP status code 404 Not Found (\"no such key\") error. If +you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 +Forbidden (\"access denied\") error. Directory bucket permissions - To grant access to +this API operation on a directory bucket, we recommend that you use the CreateSession API +operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . Encryption +Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD +requests if your object uses server-side encryption with Key Management Service (KMS) keys +(SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), +or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The +x-amz-server-side-encryption header is used when you PUT an object to S3 and want to +specify the encryption method. If you include this header in a GET request for an object +that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the +encryption method can't be changed when you retrieve the object. If you encrypt an object +by using server-side encryption with customer-provided encryption keys (SSE-C) when you +store the object in Amazon S3, then when you retrieve the metadata from the object, you +must use the following headers to provide the encryption key for the server to be able to +retrieve the object's metadata. The headers are: +x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -Encryption Keys) in the Amazon S3 User Guide. Encryption request headers, such as -x-amz-server-side-encryption, should not be sent for GET requests if your object uses -server-side encryption with Amazon Web Services KMS keys stored in Amazon Web Services Key -Management Service (SSE-KMS) or server-side encryption with Amazon S3 managed keys -(SSE-S3). If your object does use these types of keys, you'll get an HTTP 400 Bad Request -error. The last modified property in this case is the creation date of the object. -Consider the following when using request headers: If both of the If-Match and -If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns -the HTTP status code 200 OK and the data requested: If-Match condition evaluates to -true. If-Unmodified-Since condition evaluates to false. If both of the If-None-Match -and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns -the HTTP status code 304 Not Modified: If-None-Match condition evaluates to false. -If-Modified-Since condition evaluates to true. For more information about conditional -requests, see RFC 7232. Permissions The permissions that you need to use this operation -depend on whether the bucket is versioned. If the bucket is versioned, you need both the -s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If -the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes -permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 -User Guide. If the object that you request does not exist, the error Amazon S3 returns -depends on whether you also have the s3:ListBucket permission. If you have the -s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found -(\"no such key\") error. If you don't have the s3:ListBucket permission, Amazon S3 -returns an HTTP status code 403 Forbidden (\"access denied\") error. The following -actions are related to GetObjectAttributes: GetObject GetObjectAcl -GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention -GetObjectTagging HeadObject ListParts +Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For +directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) +(AES256) is supported. Versioning Directory buckets - S3 Versioning isn't enabled and +supported for directory buckets. For this API operation, only the null value of the version +ID is supported by directory buckets. You can only specify null to the versionId query +parameter in the request. Conditional request headers Consider the following when using +request headers: If both of the If-Match and If-Unmodified-Since headers are present in +the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data +requested: If-Match condition evaluates to true. If-Unmodified-Since condition +evaluates to false. For more information about conditional requests, see RFC 7232. If +both of the If-None-Match and If-Modified-Since headers are present in the request as +follows, then Amazon S3 returns the HTTP status code 304 Not Modified: If-None-Match +condition evaluates to false. If-Modified-Since condition evaluates to true. For more +information about conditional requests, see RFC 7232. HTTP Host header syntax +Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to +GetObjectAttributes: GetObject GetObjectAcl GetObjectLegalHold +GetObjectLockConfiguration GetObjectRetention GetObjectTagging HeadObject +ListParts # Arguments -- `bucket`: The name of the bucket that contains the object. When using this action with an - access point, you must direct requests to the access point hostname. The access point - hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - using this action with an access point through the Amazon Web Services SDKs, you provide - the access point ARN in place of the bucket name. For more information about access point - ARNs, see Using access points in the Amazon S3 User Guide. When you use this action with - Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - Outposts hostname takes the form +- `bucket`: The name of the bucket that contains the object. Directory buckets - When you + use this operation with a directory bucket, you must use virtual-hosted-style requests in + the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not + supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket + names must follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The object key. -- `x-amz-object-attributes`: An XML header that specifies the fields at the root level that - you want returned in the response. Fields that you do not specify are not returned. +- `x-amz-object-attributes`: Specifies the fields at the root level that you want returned + in the response. Fields that you do not specify are not returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"versionId"`: The version ID used to reference a specific version of the object. +- `"versionId"`: The version ID used to reference a specific version of the object. S3 + Versioning isn't enabled and supported for directory buckets. For this API operation, only + the null value of the version ID is supported by directory buckets. You can only specify + null to the versionId query parameter in the request. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-max-parts"`: Sets the maximum number of parts to return. - `"x-amz-part-number-marker"`: Specifies the part after which listing should begin. Only parts with higher part numbers will be listed. - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when - encrypting the object (for example, AES256). + encrypting the object (for example, AES256). This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + x-amz-server-side-encryption-customer-algorithm header. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported for directory buckets. """ function get_object_attributes( Bucket, Key, x_amz_object_attributes; aws_config::AbstractAWSConfig=global_aws_config() @@ -2971,14 +3646,17 @@ end get_object_legal_hold(bucket, key) get_object_legal_hold(bucket, key, params::Dict{String,<:Any}) -Gets an object's current legal hold status. For more information, see Locking Objects. This -action is not supported by Amazon S3 on Outposts. The following action is related to -GetObjectLegalHold: GetObjectAttributes + This operation is not supported by directory buckets. Gets an object's current legal hold +status. For more information, see Locking Objects. This functionality is not supported for +Amazon S3 on Outposts. The following action is related to GetObjectLegalHold: +GetObjectAttributes # Arguments - `bucket`: The bucket name containing the object whose legal hold status you want to - retrieve. When using this action with an access point, you must direct requests to the - access point hostname. The access point hostname takes the form + retrieve. Access points - When you use this action with an access point, you must provide + the alias of the access point in place of the bucket name or specify the access point ARN. + When using the access point ARN, you must direct requests to the access point hostname. The + access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access @@ -2989,8 +3667,8 @@ GetObjectLegalHold: GetObjectAttributes Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"versionId"`: The version ID of the object whose legal hold status you want to retrieve. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function get_object_legal_hold( @@ -3022,25 +3700,27 @@ end get_object_lock_configuration(bucket) get_object_lock_configuration(bucket, params::Dict{String,<:Any}) -Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock -configuration will be applied by default to every new object placed in the specified -bucket. For more information, see Locking Objects. The following action is related to -GetObjectLockConfiguration: GetObjectAttributes + This operation is not supported by directory buckets. Gets the Object Lock configuration +for a bucket. The rule specified in the Object Lock configuration will be applied by +default to every new object placed in the specified bucket. For more information, see +Locking Objects. The following action is related to GetObjectLockConfiguration: +GetObjectAttributes # Arguments -- `bucket`: The bucket whose Object Lock configuration you want to retrieve. When using - this action with an access point, you must direct requests to the access point hostname. - The access point hostname takes the form - AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with - an access point through the Amazon Web Services SDKs, you provide the access point ARN in - place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. +- `bucket`: The bucket whose Object Lock configuration you want to retrieve. Access points + - When you use this action with an access point, you must provide the alias of the access + point in place of the bucket name or specify the access point ARN. When using the access + point ARN, you must direct requests to the access point hostname. The access point hostname + takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using + this action with an access point through the Amazon Web Services SDKs, you provide the + access point ARN in place of the bucket name. For more information about access point ARNs, + see Using access points in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_object_lock_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -3068,14 +3748,17 @@ end get_object_retention(bucket, key) get_object_retention(bucket, key, params::Dict{String,<:Any}) -Retrieves an object's retention settings. For more information, see Locking Objects. This -action is not supported by Amazon S3 on Outposts. The following action is related to -GetObjectRetention: GetObjectAttributes + This operation is not supported by directory buckets. Retrieves an object's retention +settings. For more information, see Locking Objects. This functionality is not supported +for Amazon S3 on Outposts. The following action is related to GetObjectRetention: +GetObjectAttributes # Arguments - `bucket`: The bucket name containing the object whose retention settings you want to - retrieve. When using this action with an access point, you must direct requests to the - access point hostname. The access point hostname takes the form + retrieve. Access points - When you use this action with an access point, you must provide + the alias of the access point in place of the bucket name or specify the access point ARN. + When using the access point ARN, you must direct requests to the access point hostname. The + access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access @@ -3087,8 +3770,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"versionId"`: The version ID for the object whose retention settings you want to retrieve. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function get_object_retention( @@ -3120,37 +3803,41 @@ end get_object_tagging(bucket, key) get_object_tagging(bucket, key, params::Dict{String,<:Any}) -Returns the tag-set of an object. You send the GET request against the tagging subresource -associated with the object. To use this operation, you must have permission to perform the -s3:GetObjectTagging action. By default, the GET action returns information about current -version of an object. For a versioned bucket, you can have multiple versions of an object -in your bucket. To retrieve tags of any other version, use the versionId query parameter. -You also need permission for the s3:GetObjectVersionTagging action. By default, the bucket -owner has this permission and can grant this permission to others. For information about -the Amazon S3 object tagging feature, see Object Tagging. The following actions are related -to GetObjectTagging: DeleteObjectTagging GetObjectAttributes PutObjectTagging + This operation is not supported by directory buckets. Returns the tag-set of an object. +You send the GET request against the tagging subresource associated with the object. To use +this operation, you must have permission to perform the s3:GetObjectTagging action. By +default, the GET action returns information about current version of an object. For a +versioned bucket, you can have multiple versions of an object in your bucket. To retrieve +tags of any other version, use the versionId query parameter. You also need permission for +the s3:GetObjectVersionTagging action. By default, the bucket owner has this permission +and can grant this permission to others. For information about the Amazon S3 object +tagging feature, see Object Tagging. The following actions are related to GetObjectTagging: + DeleteObjectTagging GetObjectAttributes PutObjectTagging # Arguments - `bucket`: The bucket name containing the object for which to get the tagging information. - When using this action with an access point, you must direct requests to the access point - hostname. The access point hostname takes the form + Access points - When you use this action with an access point, you must provide the alias + of the access point in place of the bucket name or specify the access point ARN. When using + the access point ARN, you must direct requests to the access point hostname. The access + point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon + S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which to get the tagging information. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"versionId"`: The versionId of the object for which to get the tagging information. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function get_object_tagging(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3180,12 +3867,12 @@ end get_object_torrent(bucket, key) get_object_torrent(bucket, key, params::Dict{String,<:Any}) -Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're -distributing large files. You can get torrent only for objects that are less than 5 GB in -size, and that are not encrypted using server-side encryption with a customer-provided -encryption key. To use GET, you must have READ access to the object. This action is not -supported by Amazon S3 on Outposts. The following action is related to GetObjectTorrent: -GetObject + This operation is not supported by directory buckets. Returns torrent files from a +bucket. BitTorrent can save you bandwidth when you're distributing large files. You can +get torrent only for objects that are less than 5 GB in size, and that are not encrypted +using server-side encryption with a customer-provided encryption key. To use GET, you must +have READ access to the object. This functionality is not supported for Amazon S3 on +Outposts. The following action is related to GetObjectTorrent: GetObject # Arguments - `bucket`: The name of the bucket containing the object for which to get the torrent files. @@ -3194,8 +3881,8 @@ GetObject # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: """ function get_object_torrent(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3225,17 +3912,18 @@ end get_public_access_block(bucket) get_public_access_block(bucket, params::Dict{String,<:Any}) -Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this -operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information -about Amazon S3 permissions, see Specifying Permissions in a Policy. When Amazon S3 -evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the -PublicAccessBlock configuration for both the bucket (or the bucket that contains the -object) and the bucket owner's account. If the PublicAccessBlock settings are different -between the bucket and the account, Amazon S3 uses the most restrictive combination of the -bucket-level and account-level settings. For more information about when Amazon S3 -considers a bucket or an object public, see The Meaning of \"Public\". The following -operations are related to GetPublicAccessBlock: Using Amazon S3 Block Public Access -PutPublicAccessBlock GetPublicAccessBlock DeletePublicAccessBlock + This operation is not supported by directory buckets. Retrieves the PublicAccessBlock +configuration for an Amazon S3 bucket. To use this operation, you must have the +s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, +see Specifying Permissions in a Policy. When Amazon S3 evaluates the PublicAccessBlock +configuration for a bucket or an object, it checks the PublicAccessBlock configuration for +both the bucket (or the bucket that contains the object) and the bucket owner's account. If +the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 +uses the most restrictive combination of the bucket-level and account-level settings. For +more information about when Amazon S3 considers a bucket or an object public, see The +Meaning of \"Public\". The following operations are related to GetPublicAccessBlock: +Using Amazon S3 Block Public Access PutPublicAccessBlock GetPublicAccessBlock +DeletePublicAccessBlock # Arguments - `bucket`: The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want @@ -3244,8 +3932,8 @@ PutPublicAccessBlock GetPublicAccessBlock DeletePublicAccessBlock # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function get_public_access_block(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3271,49 +3959,67 @@ end head_bucket(bucket) head_bucket(bucket, params::Dict{String,<:Any}) -This action is useful to determine if a bucket exists and you have permission to access it. -The action returns a 200 OK if the bucket exists and you have permission to access it. If -the bucket does not exist or you do not have permission to access it, the HEAD request -returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is -not included, so you cannot determine the exception beyond these error codes. To use this -operation, you must have permissions to perform the s3:ListBucket action. The bucket owner -has this permission by default and can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations and -Managing Access Permissions to Your Amazon S3 Resources. To use this API operation against -an access point, you must provide the alias of the access point in place of the bucket name -or specify the access point ARN. When using the access point ARN, you must direct requests -to the access point hostname. The access point hostname takes the form -AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the Amazon Web -Services SDKs, you provide the ARN in place of the bucket name. For more information, see -Using access points. To use this API operation against an Object Lambda access point, -provide the alias of the Object Lambda access point in place of the bucket name. If the -Object Lambda access point alias in a request is not valid, the error code -InvalidAccessPointAliasError is returned. For more information about -InvalidAccessPointAliasError, see List of Error Codes. +You can use this operation to determine if a bucket exists and if you have permission to +access it. The action returns a 200 OK if the bucket exists and you have permission to +access it. If the bucket does not exist or you do not have permission to access it, the +HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A +message body is not included, so you cannot determine the exception beyond these HTTP +response codes. Directory buckets - You must make requests for this API operation to the +Zonal endpoint. These endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not +supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Authentication and authorization All HeadBucket requests must be authenticated +and signed by using IAM credentials (access key ID and secret access key for the IAM +identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be +signed. For more information, see REST Authentication. Directory bucket - You must use IAM +credentials to authenticate and authorize your access to the HeadBucket API operation, +instead of using the temporary security credentials through the CreateSession API +operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your +behalf. Permissions General purpose bucket permissions - To use this operation, you +must have permissions to perform the s3:ListBucket action. The bucket owner has this +permission by default and can grant this permission to others. For more information about +permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 +User Guide. Directory bucket permissions - You must have the s3express:CreateSession +permission in the Action element of a policy. By default, the session is in the ReadWrite +mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode +condition key to ReadOnly on the bucket. For more information about example bucket +policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services +Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the +Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host +header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. # Arguments -- `bucket`: The bucket name. When using this action with an access point, you must direct - requests to the access point hostname. The access point hostname takes the form +- `bucket`: The bucket name. Directory buckets - When you use this operation with a + directory bucket, you must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with an Object Lambda access - point, provide the alias of the Object Lambda access point in place of the bucket name. If - the Object Lambda access point alias in a request is not valid, the error code - InvalidAccessPointAliasError is returned. For more information about - InvalidAccessPointAliasError, see List of Error Codes. When you use this action with Amazon - S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - hostname takes the form + points in the Amazon S3 User Guide. Object Lambda access points - When you use this API + operation with an Object Lambda access point, provide the alias of the Object Lambda access + point in place of the bucket name. If the Object Lambda access point alias in a request is + not valid, the error code InvalidAccessPointAliasError is returned. For more information + about InvalidAccessPointAliasError, see List of Error Codes. Access points and Object + Lambda access points are not supported by directory buckets. S3 on Outposts - When you + use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function head_bucket(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3("HEAD", "/$(Bucket)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -3330,90 +4036,144 @@ end head_object(bucket, key) head_object(bucket, key, params::Dict{String,<:Any}) -The HEAD action retrieves metadata from an object without returning the object itself. This -action is useful if you're only interested in an object's metadata. To use HEAD, you must -have READ access to the object. A HEAD request has the same options as a GET action on an -object. The response is identical to the GET response except that there is no response -body. Because of this, if the HEAD request generates an error, it returns a generic 400 Bad -Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exact -exception beyond these error codes. If you encrypt an object by using server-side -encryption with customer-provided encryption keys (SSE-C) when you store the object in -Amazon S3, then when you retrieve the metadata from the object, you must use the following -headers: x-amz-server-side-encryption-customer-algorithm -x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 -For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -Encryption Keys). Encryption request headers, like x-amz-server-side-encryption, should -not be sent for GET requests if your object uses server-side encryption with KMS keys -(SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If -your object does use these types of keys, you’ll get an HTTP 400 BadRequest error. The -last modified property in this case is the creation date of the object. Request headers -are limited to 8 KB in size. For more information, see Common Request Headers. Consider the -following when using request headers: Consideration 1 – If both of the If-Match and -If-Unmodified-Since headers are present in the request as follows: If-Match condition -evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -S3 returns 200 OK and the data requested. Consideration 2 – If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates -to true; Then Amazon S3 returns the 304 Not Modified response code. For more -information about conditional requests, see RFC 7232. Permissions You need the relevant -read object (or version) permission for this operation. For more information, see Actions, -resources, and condition keys for Amazon S3. If the object you request does not exist, the -error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If -you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -returns an HTTP status code 403 (\"access denied\") error. The following actions are -related to HeadObject: GetObject GetObjectAttributes +The HEAD operation retrieves metadata from an object without returning the object itself. +This operation is useful if you're interested only in an object's metadata. A HEAD request +has the same options as a GET operation on an object. The response is identical to the GET +response except that there is no response body. Because of this, if the HEAD request +generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 +Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not +possible to retrieve the exact exception of these error codes. Request headers are limited +to 8 KB in size. For more information, see Common Request Headers. Directory buckets - +For directory buckets, you must make requests for this API operation to the Zonal endpoint. +These endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Permissions General purpose bucket permissions - To use HEAD, you must have +the s3:GetObject permission. You need the relevant read object (or version) permission for +this operation. For more information, see Actions, resources, and condition keys for Amazon +S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that +Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you +have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 +Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an +HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access +to this API operation on a directory bucket, we recommend that you use the CreateSession +API operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . Encryption +Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD +requests if your object uses server-side encryption with Key Management Service (KMS) keys +(SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), +or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The +x-amz-server-side-encryption header is used when you PUT an object to S3 and want to +specify the encryption method. If you include this header in a HEAD request for an object +that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the +encryption method can't be changed when you retrieve the object. If you encrypt an object +by using server-side encryption with customer-provided encryption keys (SSE-C) when you +store the object in Amazon S3, then when you retrieve the metadata from the object, you +must use the following headers to provide the encryption key for the server to be able to +retrieve the object's metadata. The headers are: +x-amz-server-side-encryption-customer-algorithm +x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For +directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) +(AES256) is supported. Versioning If the current version of the object is a delete +marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: +true in the response. If the specified version is a delete marker, the response returns a +405 Method Not Allowed error and the Last-Modified: timestamp response header. +Directory buckets - Delete marker is not supported by directory buckets. Directory +buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API +operation, only the null value of the version ID is supported by directory buckets. You can +only specify null to the versionId query parameter in the request. HTTP Host header +syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to +HeadObject: GetObject GetObjectAttributes # Arguments -- `bucket`: The name of the bucket containing the object. When using this action with an - access point, you must direct requests to the access point hostname. The access point - hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - using this action with an access point through the Amazon Web Services SDKs, you provide - the access point ARN in place of the bucket name. For more information about access point - ARNs, see Using access points in the Amazon S3 User Guide. When you use this action with - Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - Outposts hostname takes the form +- `bucket`: The name of the bucket that contains the object. Directory buckets - When you + use this operation with a directory bucket, you must use virtual-hosted-style requests in + the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not + supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket + names must follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The object key. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"If-Match"`: Return the object only if its entity tag (ETag) is the same as the one - specified; otherwise, return a 412 (precondition failed) error. + specified; otherwise, return a 412 (precondition failed) error. If both of the If-Match and + If-Unmodified-Since headers are present in the request as follows: If-Match condition + evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon + S3 returns 200 OK and the data requested. For more information about conditional requests, + see RFC 7232. - `"If-Modified-Since"`: Return the object only if it has been modified since the specified - time; otherwise, return a 304 (not modified) error. + time; otherwise, return a 304 (not modified) error. If both of the If-None-Match and + If-Modified-Since headers are present in the request as follows: If-None-Match condition + evaluates to false, and; If-Modified-Since condition evaluates to true; Then Amazon S3 + returns the 304 Not Modified response code. For more information about conditional + requests, see RFC 7232. - `"If-None-Match"`: Return the object only if its entity tag (ETag) is different from the - one specified; otherwise, return a 304 (not modified) error. + one specified; otherwise, return a 304 (not modified) error. If both of the If-None-Match + and If-Modified-Since headers are present in the request as follows: If-None-Match + condition evaluates to false, and; If-Modified-Since condition evaluates to true; Then + Amazon S3 returns the 304 Not Modified response code. For more information about + conditional requests, see RFC 7232. - `"If-Unmodified-Since"`: Return the object only if it has not been modified since the - specified time; otherwise, return a 412 (precondition failed) error. + specified time; otherwise, return a 412 (precondition failed) error. If both of the + If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match + condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; + Then Amazon S3 returns 200 OK and the data requested. For more information about + conditional requests, see RFC 7232. - `"Range"`: HeadObject returns only the metadata for an object. If the Range is satisfiable, only the ContentLength is affected in the response. If the Range is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. -- `"versionId"`: VersionId used to reference a specific version of the object. +- `"versionId"`: Version ID used to reference a specific version of the object. For + directory buckets in this API operation, only the null value of the version ID is + supported. - `"x-amz-checksum-mode"`: To retrieve the checksum, this parameter must be enabled. In addition, if you enable ChecksumMode and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must have permission to use the kms:Decrypt action for the request to succeed. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + x-amz-server-side-encryption-customer-algorithm header. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported for directory buckets. """ function head_object(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3439,21 +4199,22 @@ end list_bucket_analytics_configurations(bucket) list_bucket_analytics_configurations(bucket, params::Dict{String,<:Any}) -Lists the analytics configurations for the bucket. You can have up to 1,000 analytics -configurations per bucket. This action supports list pagination and does not return more -than 100 configurations at a time. You should always check the IsTruncated element in the -response. If there are no more configurations to list, IsTruncated is set to false. If -there are more configurations to list, IsTruncated is set to true, and there will be a -value in NextContinuationToken. You use the NextContinuationToken value to continue the -pagination of the list by passing the value in continuation-token in the request to GET the -next page. To use this operation, you must have permissions to perform the -s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. For information about Amazon S3 analytics feature, see Amazon S3 -Analytics – Storage Class Analysis. The following operations are related to -ListBucketAnalyticsConfigurations: GetBucketAnalyticsConfiguration -DeleteBucketAnalyticsConfiguration PutBucketAnalyticsConfiguration + This operation is not supported by directory buckets. Lists the analytics configurations +for the bucket. You can have up to 1,000 analytics configurations per bucket. This action +supports list pagination and does not return more than 100 configurations at a time. You +should always check the IsTruncated element in the response. If there are no more +configurations to list, IsTruncated is set to false. If there are more configurations to +list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You +use the NextContinuationToken value to continue the pagination of the list by passing the +value in continuation-token in the request to GET the next page. To use this operation, you +must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner +has this permission by default. The bucket owner can grant this permission to others. For +more information about permissions, see Permissions Related to Bucket Subresource +Operations and Managing Access Permissions to Your Amazon S3 Resources. For information +about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis. The +following operations are related to ListBucketAnalyticsConfigurations: +GetBucketAnalyticsConfiguration DeleteBucketAnalyticsConfiguration +PutBucketAnalyticsConfiguration # Arguments - `bucket`: The name of the bucket from which analytics configurations are retrieved. @@ -3463,8 +4224,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"continuation-token"`: The ContinuationToken that represents a placeholder from where this request should begin. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function list_bucket_analytics_configurations( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -3492,21 +4253,22 @@ end list_bucket_intelligent_tiering_configurations(bucket) list_bucket_intelligent_tiering_configurations(bucket, params::Dict{String,<:Any}) -Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3 -Intelligent-Tiering storage class is designed to optimize storage costs by automatically -moving data to the most cost-effective storage access tier, without performance impact or -operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low -latency and high throughput access tiers. To get the lowest storage cost on data that can -be accessed in minutes to hours, you can choose to activate additional archiving -capabilities. The S3 Intelligent-Tiering storage class is the ideal storage class for data -with unknown, changing, or unpredictable access patterns, independent of object size or -retention period. If the size of an object is less than 128 KB, it is not monitored and not -eligible for auto-tiering. Smaller objects can be stored, but they are always charged at -the Frequent Access tier rates in the S3 Intelligent-Tiering storage class. For more -information, see Storage class for automatically optimizing frequently and infrequently -accessed objects. Operations related to ListBucketIntelligentTieringConfigurations include: - DeleteBucketIntelligentTieringConfiguration -PutBucketIntelligentTieringConfiguration GetBucketIntelligentTieringConfiguration + This operation is not supported by directory buckets. Lists the S3 Intelligent-Tiering +configuration from the specified bucket. The S3 Intelligent-Tiering storage class is +designed to optimize storage costs by automatically moving data to the most cost-effective +storage access tier, without performance impact or operational overhead. S3 +Intelligent-Tiering delivers automatic cost savings in three low latency and high +throughput access tiers. To get the lowest storage cost on data that can be accessed in +minutes to hours, you can choose to activate additional archiving capabilities. The S3 +Intelligent-Tiering storage class is the ideal storage class for data with unknown, +changing, or unpredictable access patterns, independent of object size or retention period. +If the size of an object is less than 128 KB, it is not monitored and not eligible for +auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent +Access tier rates in the S3 Intelligent-Tiering storage class. For more information, see +Storage class for automatically optimizing frequently and infrequently accessed objects. +Operations related to ListBucketIntelligentTieringConfigurations include: +DeleteBucketIntelligentTieringConfiguration PutBucketIntelligentTieringConfiguration + GetBucketIntelligentTieringConfiguration # Arguments - `bucket`: The name of the Amazon S3 bucket whose configuration you want to modify or @@ -3543,11 +4305,12 @@ end list_bucket_inventory_configurations(bucket) list_bucket_inventory_configurations(bucket, params::Dict{String,<:Any}) -Returns a list of inventory configurations for the bucket. You can have up to 1,000 -analytics configurations per bucket. This action supports list pagination and does not -return more than 100 configurations at a time. Always check the IsTruncated element in the -response. If there are no more configurations to list, IsTruncated is set to false. If -there are more configurations to list, IsTruncated is set to true, and there is a value in + This operation is not supported by directory buckets. Returns a list of inventory +configurations for the bucket. You can have up to 1,000 analytics configurations per +bucket. This action supports list pagination and does not return more than 100 +configurations at a time. Always check the IsTruncated element in the response. If there +are no more configurations to list, IsTruncated is set to false. If there are more +configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page. To use this operation, you must have permissions to perform the @@ -3569,8 +4332,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response to continue the listing. The continuation token is an opaque value that Amazon S3 understands. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function list_bucket_inventory_configurations( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -3598,21 +4361,22 @@ end list_bucket_metrics_configurations(bucket) list_bucket_metrics_configurations(bucket, params::Dict{String,<:Any}) -Lists the metrics configurations for the bucket. The metrics configurations are only for -the request metrics of the bucket and do not provide information on daily storage metrics. -You can have up to 1,000 configurations per bucket. This action supports list pagination -and does not return more than 100 configurations at a time. Always check the IsTruncated -element in the response. If there are no more configurations to list, IsTruncated is set to -false. If there are more configurations to list, IsTruncated is set to true, and there is a -value in NextContinuationToken. You use the NextContinuationToken value to continue the -pagination of the list by passing the value in continuation-token in the request to GET the -next page. To use this operation, you must have permissions to perform the -s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources. For more information about metrics configurations and CloudWatch -request metrics, see Monitoring Metrics with Amazon CloudWatch. The following operations -are related to ListBucketMetricsConfigurations: PutBucketMetricsConfiguration + This operation is not supported by directory buckets. Lists the metrics configurations +for the bucket. The metrics configurations are only for the request metrics of the bucket +and do not provide information on daily storage metrics. You can have up to 1,000 +configurations per bucket. This action supports list pagination and does not return more +than 100 configurations at a time. Always check the IsTruncated element in the response. If +there are no more configurations to list, IsTruncated is set to false. If there are more +configurations to list, IsTruncated is set to true, and there is a value in +NextContinuationToken. You use the NextContinuationToken value to continue the pagination +of the list by passing the value in continuation-token in the request to GET the next page. +To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +action. The bucket owner has this permission by default. The bucket owner can grant this +permission to others. For more information about permissions, see Permissions Related to +Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. +For more information about metrics configurations and CloudWatch request metrics, see +Monitoring Metrics with Amazon CloudWatch. The following operations are related to +ListBucketMetricsConfigurations: PutBucketMetricsConfiguration GetBucketMetricsConfiguration DeleteBucketMetricsConfiguration # Arguments @@ -3625,8 +4389,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function list_bucket_metrics_configurations( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -3651,9 +4415,10 @@ end list_buckets() list_buckets(params::Dict{String,<:Any}) -Returns a list of all buckets owned by the authenticated sender of the request. To use this -operation, you must have the s3:ListAllMyBuckets permission. For information about Amazon -S3 buckets, see Creating, configuring, and working with Amazon S3 buckets. + This operation is not supported by directory buckets. Returns a list of all buckets owned +by the authenticated sender of the request. To use this operation, you must have the +s3:ListAllMyBuckets permission. For information about Amazon S3 buckets, see Creating, +configuring, and working with Amazon S3 buckets. """ function list_buckets(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3665,40 +4430,118 @@ function list_buckets( return s3("GET", "/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) end +""" + list_directory_buckets() + list_directory_buckets(params::Dict{String,<:Any}) + +Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the +request. For more information about directory buckets, see Directory buckets in the Amazon +S3 User Guide. Directory buckets - For directory buckets, you must make requests for +this API operation to the Regional endpoint. These endpoints support path-style requests in +the format https://s3express-control.region_code.amazonaws.com/bucket-name . +Virtual-hosted-style requests aren't supported. For more information, see Regional and +Zonal endpoints in the Amazon S3 User Guide. Permissions You must have the +s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy instead of a +bucket policy. Cross-account access to this API operation isn't supported. This operation +can only be performed by the Amazon Web Services account that owns the resource. For more +information about directory bucket policies and permissions, see Amazon Web Services +Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. +HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +s3express-control.region.amazonaws.com. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"continuation-token"`: ContinuationToken indicates to Amazon S3 that the list is being + continued on this bucket with a token. ContinuationToken is obfuscated and is not a real + key. You can use this ContinuationToken for pagination of the list results. +- `"max-directory-buckets"`: Maximum number of buckets to be returned in response. When the + number is more than the count of buckets that are owned by an Amazon Web Services account, + return all the buckets in response. +""" +function list_directory_buckets(; aws_config::AbstractAWSConfig=global_aws_config()) + return s3("GET", "/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_directory_buckets( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3("GET", "/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end + """ list_multipart_uploads(bucket) list_multipart_uploads(bucket, params::Dict{String,<:Any}) -This action lists in-progress multipart uploads. An in-progress multipart upload is a -multipart upload that has been initiated using the Initiate Multipart Upload request, but -has not yet been completed or aborted. This action returns at most 1,000 multipart uploads -in the response. 1,000 multipart uploads is the maximum number of uploads a response can -include, which is also the default value. You can further limit the number of uploads in a -response by specifying the max-uploads parameter in the response. If additional multipart -uploads satisfy the list criteria, the response will contain an IsTruncated element with -the value true. To list the additional multipart uploads, use the key-marker and -upload-id-marker request parameters. In the response, the uploads are sorted by key. If -your application has initiated more than one multipart upload using the same object key, -then uploads in the response are first sorted by key. Additionally, uploads are sorted in -ascending order within each key by the upload initiation time. For more information on -multipart uploads, see Uploading Objects Using Multipart Upload. For information on -permissions required to use the multipart upload API, see Multipart Upload and Permissions. -The following operations are related to ListMultipartUploads: CreateMultipartUpload -UploadPart CompleteMultipartUpload ListParts AbortMultipartUpload +This operation lists in-progress multipart uploads in a bucket. An in-progress multipart +upload is a multipart upload that has been initiated by the CreateMultipartUpload request, +but has not yet been completed or aborted. Directory buckets - If multipart uploads in a +directory bucket are in progress, you can't delete the bucket until all the in-progress +multipart uploads are aborted or completed. The ListMultipartUploads operation returns a +maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is +also the default value. You can further limit the number of uploads in a response by +specifying the max-uploads request parameter. If there are more than 1,000 multipart +uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated +element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. +To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads +requests. In these requests, include two query parameters: key-marker and upload-id-marker. +Set the value of key-marker to the NextKeyMarker value from the previous response. +Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the +previous response. Directory buckets - The upload-id-marker element and the +NextUploadIdMarker element aren't supported by directory buckets. To list the additional +multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value +from the previous response. For more information about multipart uploads, see Uploading +Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For +directory buckets, you must make requests for this API operation to the Zonal endpoint. +These endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Permissions General purpose bucket permissions - For information about +permissions required to use the multipart upload API, see Multipart Upload and Permissions +in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API +operation on a directory bucket, we recommend that you use the CreateSession API +operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . Sorting of +multipart uploads in response General purpose bucket - In the ListMultipartUploads +response, the multipart uploads are sorted based on two criteria: Key-based sorting - +Multipart uploads are initially sorted in ascending order based on their object keys. +Time-based sorting - For uploads that share the same object key, they are further sorted in +ascending order based on the upload initiation time. Among uploads with the same key, the +one that was initiated first will appear before the ones that were initiated later. +Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't +sorted lexicographically based on the object keys. HTTP Host header syntax Directory +buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +ListMultipartUploads: CreateMultipartUpload UploadPart CompleteMultipartUpload + ListParts AbortMultipartUpload # Arguments -- `bucket`: The name of the bucket to which the multipart upload was initiated. When using - this action with an access point, you must direct requests to the access point hostname. - The access point hostname takes the form +- `bucket`: The name of the bucket to which the multipart upload was initiated. Directory + buckets - When you use this operation with a directory bucket, you must use + virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3707,27 +4550,38 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys prefix are grouped under a single result element, CommonPrefixes. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under CommonPrefixes result element are not returned elsewhere in the response. + Directory buckets - For directory buckets, / is the only supported delimiter. - `"encoding-type"`: -- `"key-marker"`: Together with upload-id-marker, this parameter specifies the multipart - upload after which listing should begin. If upload-id-marker is not specified, only the - keys lexicographically greater than the specified key-marker will be included in the list. - If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker - might also be included, provided those multipart uploads have upload IDs lexicographically - greater than the specified upload-id-marker. +- `"key-marker"`: Specifies the multipart upload after which listing should begin. + General purpose buckets - For general purpose buckets, key-marker is an object key. + Together with upload-id-marker, this parameter specifies the multipart upload after which + listing should begin. If upload-id-marker is not specified, only the keys lexicographically + greater than the specified key-marker will be included in the list. If upload-id-marker is + specified, any multipart uploads for a key equal to the key-marker might also be included, + provided those multipart uploads have upload IDs lexicographically greater than the + specified upload-id-marker. Directory buckets - For directory buckets, key-marker is + obfuscated and isn't a real object key. The upload-id-marker parameter isn't supported by + directory buckets. To list the additional multipart uploads, you only need to set the value + of key-marker to the NextKeyMarker value from the previous response. In the + ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based + on the object keys. - `"max-uploads"`: Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response. - `"prefix"`: Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can - think of using prefix to make groups in the same way you'd use a folder in a file system.) + think of using prefix to make groups in the same way that you'd use a folder in a file + system.) Directory buckets - For directory buckets, only prefixes that end in a delimiter + (/) are supported. - `"upload-id-marker"`: Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the - specified upload-id-marker. + specified upload-id-marker. This functionality is not supported for directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function list_multipart_uploads(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3750,15 +4604,15 @@ end list_object_versions(bucket) list_object_versions(bucket, params::Dict{String,<:Any}) -Returns metadata about all versions of the objects in a bucket. You can also use request -parameters as selection criteria to return metadata about a subset of all the object -versions. To use this operation, you must have permissions to perform the -s3:ListBucketVersions action. Be aware of the name difference. A 200 OK response can -contain valid or invalid XML. Make sure to design your application to parse the contents of -the response and handle it appropriately. To use this operation, you must have READ access -to the bucket. This action is not supported by Amazon S3 on Outposts. The following -operations are related to ListObjectVersions: ListObjectsV2 GetObject PutObject - DeleteObject + This operation is not supported by directory buckets. Returns metadata about all versions +of the objects in a bucket. You can also use request parameters as selection criteria to +return metadata about a subset of all the object versions. To use this operation, you +must have permission to perform the s3:ListBucketVersions action. Be aware of the name +difference. A 200 OK response can contain valid or invalid XML. Make sure to design +your application to parse the contents of the response and handle it appropriately. To use +this operation, you must have READ access to the bucket. The following operations are +related to ListObjectVersions: ListObjectsV2 GetObject PutObject +DeleteObject # Arguments - `bucket`: The bucket name that contains the objects. @@ -3772,20 +4626,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response. - `"encoding-type"`: - `"key-marker"`: Specifies the key to start with when listing objects in a bucket. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker. - `"prefix"`: Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You - can think of using prefix to make groups in the same way you'd use a folder in a file + can think of using prefix to make groups in the same way that you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes. - `"version-id-marker"`: Specifies the object version you want to start listing from. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. +- `"x-amz-request-payer"`: """ function list_object_versions(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3808,42 +4665,53 @@ end list_objects(bucket) list_objects(bucket, params::Dict{String,<:Any}) -Returns some or all (up to 1,000) of the objects in a bucket. You can use the request -parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK -response can contain valid or invalid XML. Be sure to design your application to parse the -contents of the response and handle it appropriately. This action has been revised. We -recommend that you use the newer version, ListObjectsV2, when developing applications. For -backward compatibility, Amazon S3 continues to support ListObjects. The following -operations are related to ListObjects: ListObjectsV2 GetObject PutObject -CreateBucket ListBuckets + This operation is not supported by directory buckets. Returns some or all (up to 1,000) +of the objects in a bucket. You can use the request parameters as selection criteria to +return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid +XML. Be sure to design your application to parse the contents of the response and handle it +appropriately. This action has been revised. We recommend that you use the newer version, +ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 +continues to support ListObjects. The following operations are related to ListObjects: +ListObjectsV2 GetObject PutObject CreateBucket ListBuckets # Arguments -- `bucket`: The name of the bucket containing the objects. When using this action with an - access point, you must direct requests to the access point hostname. The access point - hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - using this action with an access point through the Amazon Web Services SDKs, you provide - the access point ARN in place of the bucket name. For more information about access point - ARNs, see Using access points in the Amazon S3 User Guide. When you use this action with - Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - Outposts hostname takes the form +- `bucket`: The name of the bucket containing the objects. Directory buckets - When you + use this operation with a directory bucket, you must use virtual-hosted-style requests in + the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not + supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket + names must follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"delimiter"`: A delimiter is a character you use to group keys. +- `"delimiter"`: A delimiter is a character that you use to group keys. - `"encoding-type"`: - `"marker"`: Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. Marker can be any key in the bucket. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. - `"prefix"`: Limits the response to keys that begin with the specified prefix. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. - `"x-amz-request-payer"`: Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests. @@ -3866,53 +4734,96 @@ end Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to -parse the contents of the response and handle it appropriately. Objects are returned sorted -in an ascending order of the respective key names in the list. For more information about -listing objects, see Listing object keys programmatically To use this operation, you must -have READ access to the bucket. To use this action in an Identity and Access Management -(IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket -owner has this permission by default and can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations and -Managing Access Permissions to Your Amazon S3 Resources. This section describes the latest -revision of this action. We recommend that you use this revised API for application -development. For backward compatibility, Amazon S3 continues to support the prior version -of this API, ListObjects. To get a list of your buckets, see ListBuckets. The following -operations are related to ListObjectsV2: GetObject PutObject CreateBucket +parse the contents of the response and handle it appropriately. For more information about +listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To +get a list of your buckets, see ListBuckets. Directory buckets - For directory buckets, +you must make requests for this API operation to the Zonal endpoint. These endpoints +support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Permissions General purpose bucket permissions - To use this operation, you +must have READ access to the bucket. You must have permission to perform the s3:ListBucket +action. The bucket owner has this permission by default and can grant this permission to +others. For more information about permissions, see Permissions Related to Bucket +Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the +Amazon S3 User Guide. Directory bucket permissions - To grant access to this API +operation on a directory bucket, we recommend that you use the CreateSession API +operation for session-based authorization. Specifically, you grant the +s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM +identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a +session token. With the session token in your request header, you can make API requests to +this operation. After the session token expires, you make another CreateSession API call to +generate a new session token for use. Amazon Web Services CLI or SDKs create session and +refresh the session token automatically to avoid service interruptions when a session +expires. For more information about authorization, see CreateSession . Sorting order of +returned objects General purpose bucket - For general purpose buckets, ListObjectsV2 +returns objects in lexicographical order based on their key names. Directory bucket - +For directory buckets, ListObjectsV2 does not return objects in lexicographical order. +HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. This section describes the latest +revision of this action. We recommend that you use this revised API operation for +application development. For backward compatibility, Amazon S3 continues to support the +prior version of this API operation, ListObjects. The following operations are related to +ListObjectsV2: GetObject PutObject CreateBucket # Arguments -- `bucket`: Bucket name to list. When using this action with an access point, you must - direct requests to the access point hostname. The access point hostname takes the form +- `bucket`: Directory buckets - When you use this operation with a directory bucket, you + must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"continuation-token"`: ContinuationToken indicates Amazon S3 that the list is being +- `"continuation-token"`: ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real - key. -- `"delimiter"`: A delimiter is a character you use to group keys. + key. You can use this ContinuationToken for pagination of the list results. +- `"delimiter"`: A delimiter is a character that you use to group keys. Directory + buckets - For directory buckets, / is the only supported delimiter. Directory buckets - + When you query ListObjectsV2 with a delimiter during in-progress multipart uploads, the + CommonPrefixes response parameter contains the prefixes that are associated with the + in-progress multipart uploads. For more information about multipart uploads, see Multipart + Upload Overview in the Amazon S3 User Guide. - `"encoding-type"`: Encoding type used by Amazon S3 to encode object keys in the response. -- `"fetch-owner"`: The owner field is not present in listV2 by default, if you want to - return owner field with each key in the result then set the fetch owner field to true. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the + If using url, non-ASCII characters used in an object's key name will be URL encoded. For + example, the object test_file(3).png will appear as test_file%283%29.png. +- `"fetch-owner"`: The owner field is not present in ListObjectsV2 by default. If you want + to return the owner field with each key in the result, then set the FetchOwner field to + true. Directory buckets - For directory buckets, the bucket owner is returned as the + object owner for all objects. +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. -- `"prefix"`: Limits the response to keys that begin with the specified prefix. +- `"prefix"`: Limits the response to keys that begin with the specified prefix. Directory + buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported. - `"start-after"`: StartAfter is where you want Amazon S3 to start listing from. Amazon S3 - starts listing after this specified key. StartAfter can be any key in the bucket. + starts listing after this specified key. StartAfter can be any key in the bucket. This + functionality is not supported for directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. This + functionality is not supported for directory buckets. - `"x-amz-request-payer"`: Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in - their requests. + their requests. This functionality is not supported for directory buckets. """ function list_objects_v2(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3938,35 +4849,64 @@ end list_parts(bucket, key, upload_id) list_parts(bucket, key, upload_id, params::Dict{String,<:Any}) -Lists the parts that have been uploaded for a specific multipart upload. This operation -must include the upload ID, which you obtain by sending the initiate multipart upload -request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded -parts. The default number of parts returned is 1,000 parts. You can restrict the number of -parts returned by specifying the max-parts request parameter. If your multipart upload -consists of more than 1,000 parts, the response returns an IsTruncated field with the value -of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can -include the part-number-marker query string parameter and set its value to the -NextPartNumberMarker field value from the previous response. If the upload was created -using a checksum algorithm, you will need to have permission to the kms:Decrypt action for -the request to succeed. For more information on multipart uploads, see Uploading Objects -Using Multipart Upload. For information on permissions required to use the multipart upload -API, see Multipart Upload and Permissions. The following operations are related to -ListParts: CreateMultipartUpload UploadPart CompleteMultipartUpload -AbortMultipartUpload GetObjectAttributes ListMultipartUploads +Lists the parts that have been uploaded for a specific multipart upload. To use this +operation, you must provide the upload ID in the request. You obtain this uploadID by +sending the initiate multipart upload request through CreateMultipartUpload. The ListParts +request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the +default value. You can restrict the number of parts in a response by specifying the +max-parts request parameter. If your multipart upload consists of more than 1,000 parts, +the response returns an IsTruncated field with the value of true, and a +NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts +requests, include the part-number-marker query string parameter and set its value to the +NextPartNumberMarker field value from the previous response. For more information on +multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User +Guide. Directory buckets - For directory buckets, you must make requests for this API +operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in +the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +requests are not supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions General purpose bucket permissions - For +information about permissions required to use the multipart upload API, see Multipart +Upload and Permissions in the Amazon S3 User Guide. If the upload was created using +server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have +permission to the kms:Decrypt action for the ListParts request to succeed. Directory +bucket permissions - To grant access to this API operation on a directory bucket, we +recommend that you use the CreateSession API operation for session-based authorization. +Specifically, you grant the s3express:CreateSession permission to the directory bucket in a +bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on +the bucket to obtain a session token. With the session token in your request header, you +can make API requests to this operation. After the session token expires, you make another +CreateSession API call to generate a new session token for use. Amazon Web Services CLI or +SDKs create session and refresh the session token automatically to avoid service +interruptions when a session expires. For more information about authorization, see +CreateSession . HTTP Host header syntax Directory buckets - The HTTP Host header +syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are +related to ListParts: CreateMultipartUpload UploadPart CompleteMultipartUpload + AbortMultipartUpload GetObjectAttributes ListMultipartUploads # Arguments -- `bucket`: The name of the bucket to which the parts are being uploaded. When using this - action with an access point, you must direct requests to the access point hostname. The - access point hostname takes the form +- `bucket`: The name of the bucket to which the parts are being uploaded. Directory + buckets - When you use this operation with a directory bucket, you must use + virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `upload_id`: Upload ID identifying the multipart upload whose parts are being listed. @@ -3976,21 +4916,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"part-number-marker"`: Specifies the part after which listing should begin. Only parts with higher part numbers will be listed. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption-customer-algorithm"`: The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C - keys in the Amazon S3 User Guide. + keys in the Amazon S3 User Guide. This functionality is not supported for directory + buckets. - `"x-amz-server-side-encryption-customer-key"`: The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User - Guide. + Guide. This functionality is not supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the - Amazon S3 User Guide. + Amazon S3 User Guide. This functionality is not supported for directory buckets. """ function list_parts( Bucket, Key, uploadId; aws_config::AbstractAWSConfig=global_aws_config() @@ -4025,22 +4966,22 @@ end put_bucket_accelerate_configuration(accelerate_configuration, bucket) put_bucket_accelerate_configuration(accelerate_configuration, bucket, params::Dict{String,<:Any}) -Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is -a bucket-level feature that enables you to perform faster data transfers to Amazon S3. To -use this operation, you must have permission to perform the s3:PutAccelerateConfiguration -action. The bucket owner has this permission by default. The bucket owner can grant this -permission to others. For more information about permissions, see Permissions Related to -Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. -The Transfer Acceleration state of a bucket can be set to one of the following two values: - Enabled – Enables accelerated data transfers to the bucket. Suspended – Disables -accelerated data transfers to the bucket. The GetBucketAccelerateConfiguration action -returns the transfer acceleration state of a bucket. After setting the Transfer -Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the -data transfer rates to the bucket increase. The name of the bucket used for Transfer -Acceleration must be DNS-compliant and must not contain periods (\".\"). For more -information about transfer acceleration, see Transfer Acceleration. The following -operations are related to PutBucketAccelerateConfiguration: -GetBucketAccelerateConfiguration CreateBucket + This operation is not supported by directory buckets. Sets the accelerate configuration +of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that +enables you to perform faster data transfers to Amazon S3. To use this operation, you must +have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has +this permission by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources. The Transfer Acceleration state +of a bucket can be set to one of the following two values: Enabled – Enables +accelerated data transfers to the bucket. Suspended – Disables accelerated data +transfers to the bucket. The GetBucketAccelerateConfiguration action returns the transfer +acceleration state of a bucket. After setting the Transfer Acceleration state of a bucket +to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket +increase. The name of the bucket used for Transfer Acceleration must be DNS-compliant and +must not contain periods (\".\"). For more information about transfer acceleration, see +Transfer Acceleration. The following operations are related to +PutBucketAccelerateConfiguration: GetBucketAccelerateConfiguration CreateBucket # Arguments - `accelerate_configuration`: Container for setting the transfer acceleration state. @@ -4049,15 +4990,15 @@ GetBucketAccelerateConfiguration CreateBucket # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_accelerate_configuration( AccelerateConfiguration, Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -4095,30 +5036,30 @@ end put_bucket_acl(bucket) put_bucket_acl(bucket, params::Dict{String,<:Any}) -Sets the permissions on an existing bucket using access control lists (ACL). For more -information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP -permission. You can use one of the following two ways to set a bucket's permissions: -Specify the ACL in the request body Specify permissions using request headers You -cannot specify access permission using both the body and the request headers. Depending on -your application needs, you may choose to set the ACL on a bucket using either the request -body or the headers. For example, if you have an existing application that updates a bucket -ACL using the request body, then you can continue to use that approach. If your bucket -uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no -longer affect permissions. You must use policies to grant access to your bucket and the -objects in it. Requests to set ACLs or update ACLs fail and return the -AccessControlListNotSupported error code. Requests to read ACLs are still supported. For -more information, see Controlling object ownership in the Amazon S3 User Guide. -Permissions You can set access permissions using one of the following methods: Specify a -canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, -known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. -Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot -use other access control-specific headers in your request. For more information, see Canned -ACL. Specify access permissions explicitly with the x-amz-grant-read, -x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When -using these headers, you specify explicit access permissions and grantees (Amazon Web -Services accounts or Amazon S3 groups) who will receive the permission. If you use these -ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These -parameters map to the set of permissions that Amazon S3 supports in an ACL. For more + This operation is not supported by directory buckets. Sets the permissions on an existing +bucket using access control lists (ACL). For more information, see Using ACLs. To set the +ACL of a bucket, you must have the WRITE_ACP permission. You can use one of the following +two ways to set a bucket's permissions: Specify the ACL in the request body Specify +permissions using request headers You cannot specify access permission using both the +body and the request headers. Depending on your application needs, you may choose to set +the ACL on a bucket using either the request body or the headers. For example, if you have +an existing application that updates a bucket ACL using the request body, then you can +continue to use that approach. If your bucket uses the bucket owner enforced setting for +S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use +policies to grant access to your bucket and the objects in it. Requests to set ACLs or +update ACLs fail and return the AccessControlListNotSupported error code. Requests to read +ACLs are still supported. For more information, see Controlling object ownership in the +Amazon S3 User Guide. Permissions You can set access permissions by using one of the +following methods: Specify a canned ACL with the x-amz-acl request header. Amazon S3 +supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined +set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If +you use this header, you cannot use other access control-specific headers in your request. +For more information, see Canned ACL. Specify access permissions explicitly with the +x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +headers. When using these headers, you specify explicit access permissions and grantees +(Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you +use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. +These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. You specify each grantee as a type=value pair, where the type is one of the following: id – if the value specified is the canonical user ID of an Amazon Web Services account uri – if you are granting @@ -4169,8 +5110,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys automatically. - `"x-amz-acl"`: The canned ACL to apply to the bucket. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-grant-full-control"`: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket. - `"x-amz-grant-read"`: Allows grantee to list the objects in the bucket. @@ -4180,12 +5121,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys objects. - `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable bucket. - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_acl(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -4208,31 +5149,32 @@ end put_bucket_analytics_configuration(analytics_configuration, bucket, id) put_bucket_analytics_configuration(analytics_configuration, bucket, id, params::Dict{String,<:Any}) -Sets an analytics configuration for the bucket (specified by the analytics configuration -ID). You can have up to 1,000 analytics configurations per bucket. You can choose to have -storage class analysis export analysis reports sent to a comma-separated values (CSV) flat -file. See the DataExport request element. Reports are updated daily and are based on the -object filters that you configure. When selecting data export, you specify a destination -bucket and an optional destination prefix where the file is written. You can export the -data to a destination bucket in a different account. However, the destination bucket must -be in the same Region as the bucket that you are making the PUT analytics configuration to. -For more information, see Amazon S3 Analytics – Storage Class Analysis. You must create -a bucket policy on the destination bucket where the exported file is written to grant -permissions to Amazon S3 to write objects to the bucket. For an example policy, see -Granting Permissions for Amazon S3 Inventory and Storage Class Analysis. To use this -operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. -The bucket owner has this permission by default. The bucket owner can grant this permission -to others. For more information about permissions, see Permissions Related to Bucket -Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. -PutBucketAnalyticsConfiguration has the following special errors: HTTP Error: HTTP 400 -Bad Request Code: InvalidArgument Cause: Invalid argument. HTTP Error: HTTP -400 Bad Request Code: TooManyConfigurations Cause: You are attempting to create a -new configuration but have already reached the 1,000-configuration limit. HTTP -Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not the owner of the -specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to -set the configuration on the bucket. The following operations are related to -PutBucketAnalyticsConfiguration: GetBucketAnalyticsConfiguration -DeleteBucketAnalyticsConfiguration ListBucketAnalyticsConfigurations + This operation is not supported by directory buckets. Sets an analytics configuration for +the bucket (specified by the analytics configuration ID). You can have up to 1,000 +analytics configurations per bucket. You can choose to have storage class analysis export +analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport +request element. Reports are updated daily and are based on the object filters that you +configure. When selecting data export, you specify a destination bucket and an optional +destination prefix where the file is written. You can export the data to a destination +bucket in a different account. However, the destination bucket must be in the same Region +as the bucket that you are making the PUT analytics configuration to. For more information, +see Amazon S3 Analytics – Storage Class Analysis. You must create a bucket policy on +the destination bucket where the exported file is written to grant permissions to Amazon S3 +to write objects to the bucket. For an example policy, see Granting Permissions for Amazon +S3 Inventory and Storage Class Analysis. To use this operation, you must have permissions +to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +default. The bucket owner can grant this permission to others. For more information about +permissions, see Permissions Related to Bucket Subresource Operations and Managing Access +Permissions to Your Amazon S3 Resources. PutBucketAnalyticsConfiguration has the following +special errors: HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: +Invalid argument. HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations + Cause: You are attempting to create a new configuration but have already reached the +1,000-configuration limit. HTTP Error: HTTP 403 Forbidden Code: AccessDenied + Cause: You are not the owner of the specified bucket, or you do not have the +s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket. +The following operations are related to PutBucketAnalyticsConfiguration: +GetBucketAnalyticsConfiguration DeleteBucketAnalyticsConfiguration +ListBucketAnalyticsConfigurations # Arguments - `analytics_configuration`: The configuration and any analyses for the analytics filter. @@ -4242,8 +5184,8 @@ DeleteBucketAnalyticsConfiguration ListBucketAnalyticsConfigurations # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function put_bucket_analytics_configuration( AnalyticsConfiguration, Bucket, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -4284,26 +5226,27 @@ end put_bucket_cors(bucket, corsconfiguration) put_bucket_cors(bucket, corsconfiguration, params::Dict{String,<:Any}) -Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 -replaces it. To use this operation, you must be allowed to perform the s3:PutBucketCORS -action. By default, the bucket owner has this permission and can grant it to others. You -set this configuration on a bucket so that the bucket can service cross-origin requests. -For example, you might want to enable a request whose origin is http://www.example.com to -access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest -capability. To enable cross-origin resource sharing (CORS) on a bucket, you add the cors -subresource to the bucket. The cors subresource is an XML document in which you configure -rules that identify origins and the HTTP methods that can be executed on your bucket. The -document is limited to 64 KB in size. When Amazon S3 receives a cross-origin request (or a -pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the -bucket and uses the first CORSRule rule that matches the incoming browser request to enable -a cross-origin request. For a rule to match, the following conditions must be met: The -request's Origin header must match AllowedOrigin elements. The request method (for -example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of -a pre-flight OPTIONS request must be one of the AllowedMethod elements. Every header -specified in the Access-Control-Request-Headers request header of a pre-flight request must -match an AllowedHeader element. For more information about CORS, go to Enabling -Cross-Origin Resource Sharing in the Amazon S3 User Guide. The following operations are -related to PutBucketCors: GetBucketCors DeleteBucketCors RESTOPTIONSobject + This operation is not supported by directory buckets. Sets the cors configuration for +your bucket. If the configuration exists, Amazon S3 replaces it. To use this operation, you +must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has +this permission and can grant it to others. You set this configuration on a bucket so that +the bucket can service cross-origin requests. For example, you might want to enable a +request whose origin is http://www.example.com to access your Amazon S3 bucket at +my.example.bucket.com by using the browser's XMLHttpRequest capability. To enable +cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the +bucket. The cors subresource is an XML document in which you configure rules that identify +origins and the HTTP methods that can be executed on your bucket. The document is limited +to 64 KB in size. When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS +request) against a bucket, it evaluates the cors configuration on the bucket and uses the +first CORSRule rule that matches the incoming browser request to enable a cross-origin +request. For a rule to match, the following conditions must be met: The request's Origin +header must match AllowedOrigin elements. The request method (for example, GET, PUT, +HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight +OPTIONS request must be one of the AllowedMethod elements. Every header specified in the +Access-Control-Request-Headers request header of a pre-flight request must match an +AllowedHeader element. For more information about CORS, go to Enabling Cross-Origin +Resource Sharing in the Amazon S3 User Guide. The following operations are related to +PutBucketCors: GetBucketCors DeleteBucketCors RESTOPTIONSobject # Arguments - `bucket`: Specifies the bucket impacted by the corsconfiguration. @@ -4319,15 +5262,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_cors( Bucket, CORSConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -4363,22 +5306,24 @@ end put_bucket_encryption(bucket, server_side_encryption_configuration) put_bucket_encryption(bucket, server_side_encryption_configuration, params::Dict{String,<:Any}) -This action uses the encryption subresource to configure default encryption and Amazon S3 -Bucket Keys for an existing bucket. By default, all buckets have a default encryption -configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You -can optionally configure default encryption for a bucket by using server-side encryption -with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you -specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. -For information about bucket default encryption, see Amazon S3 bucket default encryption in -the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket -Keys in the Amazon S3 User Guide. This action requires Amazon Web Services Signature -Version 4. For more information, see Authenticating Requests (Amazon Web Services -Signature Version 4). To use this operation, you must have permissions to perform the -s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related -to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption + This operation is not supported by directory buckets. This action uses the encryption +subresource to configure default encryption and Amazon S3 Bucket Keys for an existing +bucket. By default, all buckets have a default encryption configuration that uses +server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure +default encryption for a bucket by using server-side encryption with Key Management Service +(KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys +(DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure +Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption +to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate +the KMS key ID provided in PutBucketEncryption requests. This action requires Amazon Web +Services Signature Version 4. For more information, see Authenticating Requests (Amazon +Web Services Signature Version 4). To use this operation, you must have permission to +perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by +default. The bucket owner can grant this permission to others. For more information about +permissions, see Permissions Related to Bucket Subresource Operations and Managing Access +Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following +operations are related to PutBucketEncryption: GetBucketEncryption +DeleteBucketEncryption # Arguments - `bucket`: Specifies default encryption for a bucket using server-side encryption with @@ -4396,15 +5341,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_encryption( Bucket, @@ -4448,20 +5393,21 @@ end put_bucket_intelligent_tiering_configuration(bucket, intelligent_tiering_configuration, id) put_bucket_intelligent_tiering_configuration(bucket, intelligent_tiering_configuration, id, params::Dict{String,<:Any}) -Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to -1,000 S3 Intelligent-Tiering configurations per bucket. The S3 Intelligent-Tiering storage -class is designed to optimize storage costs by automatically moving data to the most -cost-effective storage access tier, without performance impact or operational overhead. S3 -Intelligent-Tiering delivers automatic cost savings in three low latency and high -throughput access tiers. To get the lowest storage cost on data that can be accessed in -minutes to hours, you can choose to activate additional archiving capabilities. The S3 -Intelligent-Tiering storage class is the ideal storage class for data with unknown, -changing, or unpredictable access patterns, independent of object size or retention period. -If the size of an object is less than 128 KB, it is not monitored and not eligible for -auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent -Access tier rates in the S3 Intelligent-Tiering storage class. For more information, see -Storage class for automatically optimizing frequently and infrequently accessed objects. -Operations related to PutBucketIntelligentTieringConfiguration include: + This operation is not supported by directory buckets. Puts a S3 Intelligent-Tiering +configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering +configurations per bucket. The S3 Intelligent-Tiering storage class is designed to optimize +storage costs by automatically moving data to the most cost-effective storage access tier, +without performance impact or operational overhead. S3 Intelligent-Tiering delivers +automatic cost savings in three low latency and high throughput access tiers. To get the +lowest storage cost on data that can be accessed in minutes to hours, you can choose to +activate additional archiving capabilities. The S3 Intelligent-Tiering storage class is the +ideal storage class for data with unknown, changing, or unpredictable access patterns, +independent of object size or retention period. If the size of an object is less than 128 +KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, +but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +storage class. For more information, see Storage class for automatically optimizing +frequently and infrequently accessed objects. Operations related to +PutBucketIntelligentTieringConfiguration include: DeleteBucketIntelligentTieringConfiguration GetBucketIntelligentTieringConfiguration ListBucketIntelligentTieringConfigurations You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 @@ -4526,39 +5472,40 @@ end put_bucket_inventory_configuration(bucket, inventory_configuration, id) put_bucket_inventory_configuration(bucket, inventory_configuration, id, params::Dict{String,<:Any}) -This implementation of the PUT action adds an inventory configuration (identified by the -inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket. -Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly -basis, and the results are published to a flat file. The bucket that is inventoried is -called the source bucket, and the bucket where the inventory flat file is stored is called -the destination bucket. The destination bucket must be in the same Amazon Web Services -Region as the source bucket. When you configure an inventory for a source bucket, you -specify the destination bucket where you want the inventory to be stored, and whether to -generate the inventory daily or weekly. You can also configure what object metadata to -include and whether to inventory all object versions or only current versions. For more -information, see Amazon S3 Inventory in the Amazon S3 User Guide. You must create a bucket -policy on the destination bucket to grant permissions to Amazon S3 to write objects to the -bucket in the defined location. For an example policy, see Granting Permissions for Amazon -S3 Inventory and Storage Class Analysis. Permissions To use this operation, you must -have permission to perform the s3:PutInventoryConfiguration action. The bucket owner has -this permission by default and can grant this permission to others. The -s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory report that -includes all object metadata fields available and to specify the destination bucket to -store the inventory. A user with read access to objects in the destination bucket can also -access all object metadata fields that are available in the inventory report. To restrict -access to an inventory report, see Restricting access to an Amazon S3 Inventory report in -the Amazon S3 User Guide. For more information about the metadata fields available in S3 -Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For more information -about permissions, see Permissions related to bucket subresource operations and Identity -and access management in Amazon S3 in the Amazon S3 User Guide. -PutBucketInventoryConfiguration has the following special errors: HTTP 400 Bad Request -Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad Request Error Code: -TooManyConfigurations Cause: You are attempting to create a new configuration but have -already reached the 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are -not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration -bucket permission to set the configuration on the bucket. The following operations are -related to PutBucketInventoryConfiguration: GetBucketInventoryConfiguration -DeleteBucketInventoryConfiguration ListBucketInventoryConfigurations + This operation is not supported by directory buckets. This implementation of the PUT +action adds an inventory configuration (identified by the inventory ID) to the bucket. You +can have up to 1,000 inventory configurations per bucket. Amazon S3 inventory generates +inventories of the objects in the bucket on a daily or weekly basis, and the results are +published to a flat file. The bucket that is inventoried is called the source bucket, and +the bucket where the inventory flat file is stored is called the destination bucket. The +destination bucket must be in the same Amazon Web Services Region as the source bucket. +When you configure an inventory for a source bucket, you specify the destination bucket +where you want the inventory to be stored, and whether to generate the inventory daily or +weekly. You can also configure what object metadata to include and whether to inventory all +object versions or only current versions. For more information, see Amazon S3 Inventory in +the Amazon S3 User Guide. You must create a bucket policy on the destination bucket to +grant permissions to Amazon S3 to write objects to the bucket in the defined location. For +an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class +Analysis. Permissions To use this operation, you must have permission to perform the +s3:PutInventoryConfiguration action. The bucket owner has this permission by default and +can grant this permission to others. The s3:PutInventoryConfiguration permission allows a +user to create an S3 Inventory report that includes all object metadata fields available +and to specify the destination bucket to store the inventory. A user with read access to +objects in the destination bucket can also access all object metadata fields that are +available in the inventory report. To restrict access to an inventory report, see +Restricting access to an Amazon S3 Inventory report in the Amazon S3 User Guide. For more +information about the metadata fields available in S3 Inventory, see Amazon S3 Inventory +lists in the Amazon S3 User Guide. For more information about permissions, see Permissions +related to bucket subresource operations and Identity and access management in Amazon S3 in +the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following special +errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP +400 Bad Request Error Code: TooManyConfigurations Cause: You are attempting to create a +new configuration but have already reached the 1,000-configuration limit. HTTP 403 +Forbidden Error Cause: You are not the owner of the specified bucket, or you do not have +the s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket. + The following operations are related to PutBucketInventoryConfiguration: +GetBucketInventoryConfiguration DeleteBucketInventoryConfiguration +ListBucketInventoryConfigurations # Arguments - `bucket`: The name of the bucket where the inventory configuration will be stored. @@ -4568,8 +5515,8 @@ DeleteBucketInventoryConfiguration ListBucketInventoryConfigurations # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function put_bucket_inventory_configuration( Bucket, InventoryConfiguration, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -4610,30 +5557,30 @@ end put_bucket_lifecycle(bucket) put_bucket_lifecycle(bucket, params::Dict{String,<:Any}) - For an updated version of this API, see PutBucketLifecycleConfiguration. This version has -been deprecated. Existing lifecycle configurations will work. For new lifecycle -configurations, use the updated API. Creates a new lifecycle configuration for the bucket -or replaces an existing lifecycle configuration. For information about lifecycle -configuration, see Object Lifecycle Management in the Amazon S3 User Guide. By default, -all Amazon S3 resources, including buckets, objects, and related subresources (for example, -lifecycle configuration and website configuration) are private. Only the resource owner, -the Amazon Web Services account that created the resource, can access it. The resource -owner can optionally grant access permissions to others by writing an access policy. For -this operation, users must get the s3:PutLifecycleConfiguration permission. You can also -explicitly deny permissions. Explicit denial also supersedes any other permissions. If you -want to prevent users or accounts from removing or deleting objects from your bucket, you -must deny them permissions for the following actions: s3:DeleteObject -s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about -permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 -User Guide. For more examples of transitioning objects to storage classes such as -STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration. The following -operations are related to PutBucketLifecycle: GetBucketLifecycle(Deprecated) -GetBucketLifecycleConfiguration RestoreObject By default, a resource owner—in this -case, a bucket owner, which is the Amazon Web Services account that created the -bucket—can perform any of the operations. A resource owner can also grant others -permission to perform the operation. For more information, see the following topics in the -Amazon S3 User Guide: Specifying Permissions in a Policy Managing Access -Permissions to your Amazon S3 Resources + This operation is not supported by directory buckets. For an updated version of this +API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing +lifecycle configurations will work. For new lifecycle configurations, use the updated API. + Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle +configuration. For information about lifecycle configuration, see Object Lifecycle +Management in the Amazon S3 User Guide. By default, all Amazon S3 resources, including +buckets, objects, and related subresources (for example, lifecycle configuration and +website configuration) are private. Only the resource owner, the Amazon Web Services +account that created the resource, can access it. The resource owner can optionally grant +access permissions to others by writing an access policy. For this operation, users must +get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. +Explicit denial also supersedes any other permissions. If you want to prevent users or +accounts from removing or deleting objects from your bucket, you must deny them permissions +for the following actions: s3:DeleteObject s3:DeleteObjectVersion +s3:PutLifecycleConfiguration For more information about permissions, see Managing Access +Permissions to your Amazon S3 Resources in the Amazon S3 User Guide. For more examples of +transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of +Lifecycle Configuration. The following operations are related to PutBucketLifecycle: +GetBucketLifecycle(Deprecated) GetBucketLifecycleConfiguration RestoreObject By +default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services +account that created the bucket—can perform any of the operations. A resource owner can +also grant others permission to perform the operation. For more information, see the +following topics in the Amazon S3 User Guide: Specifying Permissions in a Policy +Managing Access Permissions to your Amazon S3 Resources # Arguments - `bucket`: @@ -4644,15 +5591,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"LifecycleConfiguration"`: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_lifecycle(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -4678,35 +5625,36 @@ end put_bucket_lifecycle_configuration(bucket) put_bucket_lifecycle_configuration(bucket, params::Dict{String,<:Any}) -Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle -configuration. Keep in mind that this will overwrite an existing lifecycle configuration, -so if you want to retain any configuration details, they must be included in the new -lifecycle configuration. For information about lifecycle configuration, see Managing your -storage lifecycle. Bucket lifecycle configuration now supports specifying a lifecycle rule -using an object key name prefix, one or more object tags, or a combination of both. -Accordingly, this section describes the latest API. The previous version of the API -supported filtering based only on an object key name prefix, which is supported for -backward compatibility. For the related API description, see PutBucketLifecycle. Rules -You specify the lifecycle configuration in your request body. The lifecycle configuration -is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration -can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the -following: Filter identifying a subset of objects to which the rule applies. The filter -can be based on a key name prefix, object tags, or a combination of both. Status whether -the rule is in effect. One or more lifecycle transition and expiration actions that you -want Amazon S3 to perform on the objects identified by the filter. If the state of your -bucket is versioning-enabled or versioning-suspended, you can have many versions of the -same object (one current version and zero or more noncurrent versions). Amazon S3 provides -predefined actions that you can specify for current and noncurrent object versions. For -more information, see Object Lifecycle Management and Lifecycle Configuration Elements. -Permissions By default, all Amazon S3 resources are private, including buckets, objects, -and related subresources (for example, lifecycle configuration and website configuration). -Only the resource owner (that is, the Amazon Web Services account that created it) can -access the resource. The resource owner can optionally grant access permissions to others -by writing an access policy. For this operation, a user must get the -s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. Explicit -deny also supersedes any other permissions. If you want to block users or accounts from -removing or deleting objects from your bucket, you must deny them permissions for the -following actions: s3:DeleteObject s3:DeleteObjectVersion + This operation is not supported by directory buckets. Creates a new lifecycle +configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind +that this will overwrite an existing lifecycle configuration, so if you want to retain any +configuration details, they must be included in the new lifecycle configuration. For +information about lifecycle configuration, see Managing your storage lifecycle. Bucket +lifecycle configuration now supports specifying a lifecycle rule using an object key name +prefix, one or more object tags, object size, or any combination of these. Accordingly, +this section describes the latest API. The previous version of the API supported filtering +based only on an object key name prefix, which is supported for backward compatibility. For +the related API description, see PutBucketLifecycle. Rules You specify the lifecycle +configuration in your request body. The lifecycle configuration is specified as XML +consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 +rules. This limit is not adjustable. Each rule consists of the following: A filter +identifying a subset of objects to which the rule applies. The filter can be based on a key +name prefix, object tags, object size, or any combination of these. A status indicating +whether the rule is in effect. One or more lifecycle transition and expiration actions +that you want Amazon S3 to perform on the objects identified by the filter. If the state of +your bucket is versioning-enabled or versioning-suspended, you can have many versions of +the same object (one current version and zero or more noncurrent versions). Amazon S3 +provides predefined actions that you can specify for current and noncurrent object +versions. For more information, see Object Lifecycle Management and Lifecycle +Configuration Elements. Permissions By default, all Amazon S3 resources are private, +including buckets, objects, and related subresources (for example, lifecycle configuration +and website configuration). Only the resource owner (that is, the Amazon Web Services +account that created it) can access the resource. The resource owner can optionally grant +access permissions to others by writing an access policy. For this operation, a user must +get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. +An explicit deny also supersedes any other permissions. If you want to block users or +accounts from removing or deleting objects from your bucket, you must deny them permissions +for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration @@ -4720,15 +5668,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LifecycleConfiguration"`: Container for lifecycle rules. You can add as many as 1,000 rules. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_lifecycle_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -4756,25 +5704,26 @@ end put_bucket_logging(bucket, bucket_logging_status) put_bucket_logging(bucket, bucket_logging_status, params::Dict{String,<:Any}) -Set the logging parameters for a bucket and to specify permissions for who can view and -modify the logging parameters. All logs are saved to buckets in the same Amazon Web -Services Region as the source bucket. To set the logging status of a bucket, you must be -the bucket owner. The bucket owner is automatically granted FULL_CONTROL to all logs. You -use the Grantee request element to grant access to other people. The Permissions request -element specifies the kind of access the grantee has to the logs. If the target bucket for -log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use -the Grantee request element to grant access to others. Permissions can only be granted -using policies. For more information, see Permissions for server access log delivery in the -Amazon S3 User Guide. Grantee Values You can specify the person (grantee) to whom you're -assigning access rights (using request elements) in the following ways: By the person's -ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" + This operation is not supported by directory buckets. Set the logging parameters for a +bucket and to specify permissions for who can view and modify the logging parameters. All +logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To +set the logging status of a bucket, you must be the bucket owner. The bucket owner is +automatically granted FULL_CONTROL to all logs. You use the Grantee request element to +grant access to other people. The Permissions request element specifies the kind of access +the grantee has to the logs. If the target bucket for log delivery uses the bucket owner +enforced setting for S3 Object Ownership, you can't use the Grantee request element to +grant access to others. Permissions can only be granted using policies. For more +information, see Permissions for server access log delivery in the Amazon S3 User Guide. +Grantee Values You can specify the person (grantee) to whom you're assigning access rights +(by using request elements) in the following ways: By the person's ID: <Grantee +xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName>< -;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional +;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional and ignored in the request. By Email address: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<> ;</EmailAddress></Grantee> The grantee is resolved to the CanonicalUser and, -in a response to a GET Object acl request, appears as the CanonicalUser. By URI: +in a response to a GETObjectAcl request, appears as the CanonicalUser. By URI: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/Authenticate dUsers<></URI></Grantee> To enable logging, you use LoggingEnabled @@ -4796,15 +5745,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_logging( Bucket, BucketLoggingStatus; aws_config::AbstractAWSConfig=global_aws_config() @@ -4842,21 +5791,22 @@ end put_bucket_metrics_configuration(bucket, metrics_configuration, id) put_bucket_metrics_configuration(bucket, metrics_configuration, id, params::Dict{String,<:Any}) -Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. -You can have up to 1,000 metrics configurations per bucket. If you're updating an existing -metrics configuration, note that this is a full replacement of the existing metrics -configuration. If you don't include the elements you want to keep, they are erased. To use -this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. -The bucket owner has this permission by default. The bucket owner can grant this permission -to others. For more information about permissions, see Permissions Related to Bucket -Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. For -information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with -Amazon CloudWatch. The following operations are related to PutBucketMetricsConfiguration: - DeleteBucketMetricsConfiguration GetBucketMetricsConfiguration -ListBucketMetricsConfigurations GetBucketLifecycle has the following special error: -Error code: TooManyConfigurations Description: You are attempting to create a new -configuration but have already reached the 1,000-configuration limit. HTTP Status Code: -HTTP 400 Bad Request + This operation is not supported by directory buckets. Sets a metrics configuration +(specified by the metrics configuration ID) for the bucket. You can have up to 1,000 +metrics configurations per bucket. If you're updating an existing metrics configuration, +note that this is a full replacement of the existing metrics configuration. If you don't +include the elements you want to keep, they are erased. To use this operation, you must +have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has +this permission by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources. For information about CloudWatch +request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch. The following +operations are related to PutBucketMetricsConfiguration: +DeleteBucketMetricsConfiguration GetBucketMetricsConfiguration +ListBucketMetricsConfigurations PutBucketMetricsConfiguration has the following special +error: Error code: TooManyConfigurations Description: You are attempting to create a +new configuration but have already reached the 1,000-configuration limit. HTTP Status +Code: HTTP 400 Bad Request # Arguments - `bucket`: The name of the bucket for which the metrics configuration is set. @@ -4867,8 +5817,8 @@ HTTP 400 Bad Request # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function put_bucket_metrics_configuration( Bucket, MetricsConfiguration, id; aws_config::AbstractAWSConfig=global_aws_config() @@ -4909,7 +5859,8 @@ end put_bucket_notification(bucket, notification_configuration) put_bucket_notification(bucket, notification_configuration, params::Dict{String,<:Any}) - No longer used, see the PutBucketNotificationConfiguration operation. + This operation is not supported by directory buckets. No longer used, see the +PutBucketNotificationConfiguration operation. # Arguments - `bucket`: The name of the bucket. @@ -4921,15 +5872,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_notification( Bucket, NotificationConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -4967,26 +5918,27 @@ end put_bucket_notification_configuration(bucket, notification_configuration) put_bucket_notification_configuration(bucket, notification_configuration, params::Dict{String,<:Any}) -Enables notifications of specified events for a bucket. For more information about event -notifications, see Configuring Event Notifications. Using this API, you can replace an -existing notification configuration. The configuration is an XML file that defines the -event types that you want Amazon S3 to publish and the destination where you want Amazon S3 -to publish an event notification when it detects an event of the specified type. By -default, your bucket has no event notifications configured. That is, the notification -configuration will be an empty NotificationConfiguration. -<NotificationConfiguration> </NotificationConfiguration> This action -replaces the existing notification configuration with the configuration you include in the -request body. After Amazon S3 receives this request, it first verifies that any Amazon -Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) -destination exists, and that the bucket owner has permission to publish to it by sending a -test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda -function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 -bucket. For more information, see Configuring Notifications for Amazon S3 Events. You can -disable notifications by adding the empty NotificationConfiguration element. For more -information about the number of event notification configurations that you can create per -bucket, see Amazon S3 service quotas in Amazon Web Services General Reference. By default, -only the bucket owner can configure notifications on a bucket. However, bucket owners can -use a bucket policy to grant permission to other users to set this configuration with + This operation is not supported by directory buckets. Enables notifications of specified +events for a bucket. For more information about event notifications, see Configuring Event +Notifications. Using this API, you can replace an existing notification configuration. The +configuration is an XML file that defines the event types that you want Amazon S3 to +publish and the destination where you want Amazon S3 to publish an event notification when +it detects an event of the specified type. By default, your bucket has no event +notifications configured. That is, the notification configuration will be an empty +NotificationConfiguration. <NotificationConfiguration> +</NotificationConfiguration> This action replaces the existing notification +configuration with the configuration you include in the request body. After Amazon S3 +receives this request, it first verifies that any Amazon Simple Notification Service +(Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the +bucket owner has permission to publish to it by sending a test notification. In the case of +Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon +S3 permission to invoke the function from the Amazon S3 bucket. For more information, see +Configuring Notifications for Amazon S3 Events. You can disable notifications by adding the +empty NotificationConfiguration element. For more information about the number of event +notification configurations that you can create per bucket, see Amazon S3 service quotas in +Amazon Web Services General Reference. By default, only the bucket owner can configure +notifications on a bucket. However, bucket owners can use a bucket policy to grant +permission to other users to set this configuration with the required s3:PutBucketNotification permission. The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 @@ -5005,8 +5957,8 @@ GetBucketNotificationConfiguration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-skip-destination-validation"`: Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or false value. """ @@ -5046,10 +5998,11 @@ end put_bucket_ownership_controls(bucket, ownership_controls) put_bucket_ownership_controls(bucket, ownership_controls, params::Dict{String,<:Any}) -Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you -must have the s3:PutBucketOwnershipControls permission. For more information about Amazon -S3 permissions, see Specifying permissions in a policy. For information about Amazon S3 -Object Ownership, see Using object ownership. The following operations are related to + This operation is not supported by directory buckets. Creates or modifies +OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the +s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, +see Specifying permissions in a policy. For information about Amazon S3 Object Ownership, +see Using object ownership. The following operations are related to PutBucketOwnershipControls: GetBucketOwnershipControls DeleteBucketOwnershipControls @@ -5064,8 +6017,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). """ function put_bucket_ownership_controls( Bucket, OwnershipControls; aws_config::AbstractAWSConfig=global_aws_config() @@ -5101,42 +6054,76 @@ end put_bucket_policy(bucket, policy) put_bucket_policy(bucket, policy, params::Dict{String,<:Any}) -Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity -other than the root user of the Amazon Web Services account that owns the bucket, the -calling identity must have the PutBucketPolicy permissions on the specified bucket and -belong to the bucket owner's account in order to use this operation. If you don't have -PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the -correct permissions, but you're not using an identity that belongs to the bucket owner's -account, Amazon S3 returns a 405 Method Not Allowed error. To ensure that bucket owners -don't inadvertently lock themselves out of their own buckets, the root principal in a -bucket owner's Amazon Web Services account can perform the GetBucketPolicy, -PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly -denies the root principal's access. Bucket owner root principals can only be blocked from -performing these API actions by VPC endpoint policies and Amazon Web Services Organizations -policies. For more information, see Bucket policy examples. The following operations are -related to PutBucketPolicy: CreateBucket DeleteBucket +Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - For +directory buckets, you must make requests for this API operation to the Regional endpoint. +These endpoints support path-style requests in the format +https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style +requests aren't supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Permissions If you are using an identity other than the root user +of the Amazon Web Services account that owns the bucket, the calling identity must both +have the PutBucketPolicy permissions on the specified bucket and belong to the bucket +owner's account in order to use this operation. If you don't have PutBucketPolicy +permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct +permissions, but you're not using an identity that belongs to the bucket owner's account, +Amazon S3 returns a 405 Method Not Allowed error. To ensure that bucket owners don't +inadvertently lock themselves out of their own buckets, the root principal in a bucket +owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and +DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root +principal's access. Bucket owner root principals can only be blocked from performing these +API actions by VPC endpoint policies and Amazon Web Services Organizations policies. +General purpose bucket permissions - The s3:PutBucketPolicy permission is required in a +policy. For more information about general purpose buckets bucket policies, see Using +Bucket Policies and User Policies in the Amazon S3 User Guide. Directory bucket +permissions - To grant access to this API operation, you must have the +s3express:PutBucketPolicy permission in an IAM identity-based policy instead of a bucket +policy. Cross-account access to this API operation isn't supported. This operation can only +be performed by the Amazon Web Services account that owns the resource. For more +information about directory bucket policies and permissions, see Amazon Web Services +Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. + Example bucket policies General purpose buckets example bucket policies - See Bucket +policy examples in the Amazon S3 User Guide. Directory bucket example bucket policies - +See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host +header syntax Directory buckets - The HTTP Host header syntax is +s3express-control.region.amazonaws.com. The following operations are related to +PutBucketPolicy: CreateBucket DeleteBucket # Arguments -- `bucket`: The name of the bucket. -- `policy`: The bucket policy as a JSON document. +- `bucket`: The name of the bucket. Directory buckets - When you use this operation with + a directory bucket, you must use path-style requests in the format + https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style + requests aren't supported. Directory bucket names must be unique in the chosen Availability + Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide +- `policy`: The bucket policy as a JSON document. For directory buckets, the only IAM + action supported in the bucket policy is s3express:CreateSession. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Content-MD5"`: The MD5 hash of the request body. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated - automatically. + automatically. This functionality is not supported for directory buckets. - `"x-amz-confirm-remove-self-bucket-access"`: Set this parameter to true to confirm that - you want to remove your permissions to change this bucket policy in the future. + you want to remove your permissions to change this bucket policy in the future. This + functionality is not supported for directory buckets. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this + header is not supported in this API operation. If you specify this header, the request + fails with the HTTP status code 501 Not Implemented. - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm + header, replace algorithm with the supported algorithm from the following list: CRC32 + CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon + S3 User Guide. If the individual checksum value you provide through + x-amz-checksum-algorithm doesn't match the checksum algorithm you set through + x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter + and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm + . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default + checksum algorithm that's used for performance. """ function put_bucket_policy( Bucket, Policy; aws_config::AbstractAWSConfig=global_aws_config() @@ -5168,18 +6155,20 @@ end put_bucket_replication(bucket, replication_configuration) put_bucket_replication(bucket, replication_configuration, params::Dict{String,<:Any}) - Creates a replication configuration or replaces an existing one. For more information, see -Replication in the Amazon S3 User Guide. Specify the replication configuration in the -request body. In the replication configuration, you provide the name of the destination -bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon -S3 can assume to replicate objects on your behalf, and other relevant information. A -replication configuration must include at least one rule, and can contain a maximum of -1,000. Each rule identifies a subset of objects to replicate by filtering the objects in -the source bucket. To choose additional subsets of objects to replicate, add a rule for -each subset. To specify a subset of the objects in the source bucket to apply a replication -rule to, add the Filter element as a child of the Rule element. You can filter objects -based on an object key prefix, one or more object tags, or both. When you add the Filter -element in the configuration, you must also add the following elements: + This operation is not supported by directory buckets. Creates a replication +configuration or replaces an existing one. For more information, see Replication in the +Amazon S3 User Guide. Specify the replication configuration in the request body. In the +replication configuration, you provide the name of the destination bucket or buckets where +you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +replicate objects on your behalf, and other relevant information. You can invoke this +request for a specific Amazon Web Services Region by using the aws:RequestedRegion +condition key. A replication configuration must include at least one rule, and can contain +a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the +objects in the source bucket. To choose additional subsets of objects to replicate, add a +rule for each subset. To specify a subset of the objects in the source bucket to apply a +replication rule to, add the Filter element as a child of the Rule element. You can filter +objects based on an object key prefix, one or more object tags, or both. When you add the +Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority. If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility. For information about enabling versioning on @@ -5213,15 +6202,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-bucket-object-lock-token"`: A token to allow Object Lock to be enabled for an existing bucket. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_replication( Bucket, ReplicationConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -5259,11 +6248,12 @@ end put_bucket_request_payment(bucket, request_payment_configuration) put_bucket_request_payment(bucket, request_payment_configuration, params::Dict{String,<:Any}) -Sets the request payment configuration for a bucket. By default, the bucket owner pays for -downloads from the bucket. This configuration parameter enables the bucket owner (only) to -specify that the person requesting the download will be charged for the download. For more -information, see Requester Pays Buckets. The following operations are related to -PutBucketRequestPayment: CreateBucket GetBucketRequestPayment + This operation is not supported by directory buckets. Sets the request payment +configuration for a bucket. By default, the bucket owner pays for downloads from the +bucket. This configuration parameter enables the bucket owner (only) to specify that the +person requesting the download will be charged for the download. For more information, see +Requester Pays Buckets. The following operations are related to PutBucketRequestPayment: +CreateBucket GetBucketRequestPayment # Arguments - `bucket`: The bucket name. @@ -5277,15 +6267,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_request_payment( Bucket, RequestPaymentConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -5325,29 +6315,27 @@ end put_bucket_tagging(bucket, tagging) put_bucket_tagging(bucket, tagging, params::Dict{String,<:Any}) -Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill to reflect -your own cost structure. To do this, sign up to get your Amazon Web Services account bill -with tag key values included. Then, to see the cost of combined resources, organize your -billing information according to resources with the same tag key values. For example, you -can tag several resources with a specific application name, and then organize your billing -information to see the total cost of that application across several services. For more -information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket -Tags. When this operation sets the tags for a bucket, it will overwrite any current tags -the bucket already has. You cannot use this operation to add tags to an existing list of -tags. To use this operation, you must have permissions to perform the s3:PutBucketTagging -action. The bucket owner has this permission by default and can grant this permission to -others. For more information about permissions, see Permissions Related to Bucket -Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. -PutBucketTagging has the following special errors: Error code: InvalidTagError -Description: The tag provided was not a valid tag. This error can occur if the tag did not -pass input validation. For information about tag restrictions, see User-Defined Tag -Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions. Error -code: MalformedXMLError Description: The XML provided does not match the schema. -Error code: OperationAbortedError Description: A conflicting conditional action is -currently in progress against this resource. Please try again. Error code: -InternalError Description: The service was unable to apply the provided tag to the -bucket. The following operations are related to PutBucketTagging: GetBucketTagging - DeleteBucketTagging + This operation is not supported by directory buckets. Sets the tags for a bucket. Use +tags to organize your Amazon Web Services bill to reflect your own cost structure. To do +this, sign up to get your Amazon Web Services account bill with tag key values included. +Then, to see the cost of combined resources, organize your billing information according to +resources with the same tag key values. For example, you can tag several resources with a +specific application name, and then organize your billing information to see the total cost +of that application across several services. For more information, see Cost Allocation and +Tagging and Using Cost Allocation in Amazon S3 Bucket Tags. When this operation sets the +tags for a bucket, it will overwrite any current tags the bucket already has. You cannot +use this operation to add tags to an existing list of tags. To use this operation, you +must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this +permission by default and can grant this permission to others. For more information about +permissions, see Permissions Related to Bucket Subresource Operations and Managing Access +Permissions to Your Amazon S3 Resources. PutBucketTagging has the following special +errors. For more Amazon S3 errors see, Error Responses. InvalidTag - The tag provided +was not a valid tag. This error can occur if the tag did not pass input validation. For +more information, see Using Cost Allocation in Amazon S3 Bucket Tags. MalformedXML - The +XML provided does not match the schema. OperationAborted - A conflicting conditional +action is currently in progress against this resource. Please try again. InternalError - +The service was unable to apply the provided tag to the bucket. The following operations +are related to PutBucketTagging: GetBucketTagging DeleteBucketTagging # Arguments - `bucket`: The bucket name. @@ -5361,15 +6349,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_tagging( Bucket, Tagging; aws_config::AbstractAWSConfig=global_aws_config() @@ -5401,18 +6389,19 @@ end put_bucket_versioning(bucket, versioning_configuration) put_bucket_versioning(bucket, versioning_configuration, params::Dict{String,<:Any}) -Sets the versioning state of an existing bucket. You can set the versioning state with one -of the following values: Enabled—Enables versioning for the objects in the bucket. All -objects added to the bucket receive a unique version ID. Suspended—Disables versioning -for the objects in the bucket. All objects added to the bucket receive the version ID null. -If the versioning state has never been set on a bucket, it has no versioning state; a -GetBucketVersioning request does not return a versioning state value. In order to enable -MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable -MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request -header and the Status and the MfaDelete request elements in a request to set the versioning -state of the bucket. If you have an object expiration lifecycle configuration in your -non-versioned bucket and you want to maintain the same permanent delete behavior when you -enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration + This operation is not supported by directory buckets. Sets the versioning state of an +existing bucket. You can set the versioning state with one of the following values: +Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket +receive a unique version ID. Suspended—Disables versioning for the objects in the +bucket. All objects added to the bucket receive the version ID null. If the versioning +state has never been set on a bucket, it has no versioning state; a GetBucketVersioning +request does not return a versioning state value. In order to enable MFA Delete, you must +be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the +bucket versioning configuration, you must include the x-amz-mfa request header and the +Status and the MfaDelete request elements in a request to set the versioning state of the +bucket. If you have an object expiration lifecycle configuration in your non-versioned +bucket and you want to maintain the same permanent delete behavior when you enable +versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The @@ -5431,17 +6420,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-mfa"`: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_versioning( Bucket, VersioningConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -5479,27 +6468,28 @@ end put_bucket_website(bucket, website_configuration) put_bucket_website(bucket, website_configuration, params::Dict{String,<:Any}) -Sets the configuration of the website that is specified in the website subresource. To -configure a bucket as a website, you can add this subresource on the bucket with website -configuration information such as the file name of the index document and any redirect -rules. For more information, see Hosting Websites on Amazon S3. This PUT action requires -the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the -website attached to a bucket; however, bucket owners can allow other users to set the -website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite -permission. To redirect all website requests sent to the bucket's website endpoint, you add -a website configuration with the following elements. Because all requests are sent to -another website, you don't need to provide index document name for the bucket. -WebsiteConfiguration RedirectAllRequestsTo HostName Protocol If you want -granular control over redirects, you can use the following elements to add routing rules -that describe conditions for redirecting requests and information about the redirect -destination. In this case, the website configuration must provide an index document for the -bucket, because some requests might not be redirected. WebsiteConfiguration -IndexDocument Suffix ErrorDocument Key RoutingRules RoutingRule -Condition HttpErrorCodeReturnedEquals KeyPrefixEquals Redirect Protocol -HostName ReplaceKeyPrefixWith ReplaceKeyWith HttpRedirectCode Amazon S3 has -a limitation of 50 routing rules per website configuration. If you require more than 50 -routing rules, you can use object redirect. For more information, see Configuring an Object -Redirect in the Amazon S3 User Guide. + This operation is not supported by directory buckets. Sets the configuration of the +website that is specified in the website subresource. To configure a bucket as a website, +you can add this subresource on the bucket with website configuration information such as +the file name of the index document and any redirect rules. For more information, see +Hosting Websites on Amazon S3. This PUT action requires the S3:PutBucketWebsite permission. +By default, only the bucket owner can configure the website attached to a bucket; however, +bucket owners can allow other users to set the website configuration by writing a bucket +policy that grants them the S3:PutBucketWebsite permission. To redirect all website +requests sent to the bucket's website endpoint, you add a website configuration with the +following elements. Because all requests are sent to another website, you don't need to +provide index document name for the bucket. WebsiteConfiguration +RedirectAllRequestsTo HostName Protocol If you want granular control over +redirects, you can use the following elements to add routing rules that describe conditions +for redirecting requests and information about the redirect destination. In this case, the +website configuration must provide an index document for the bucket, because some requests +might not be redirected. WebsiteConfiguration IndexDocument Suffix +ErrorDocument Key RoutingRules RoutingRule Condition +HttpErrorCodeReturnedEquals KeyPrefixEquals Redirect Protocol HostName +ReplaceKeyPrefixWith ReplaceKeyWith HttpRedirectCode Amazon S3 has a limitation +of 50 routing rules per website configuration. If you require more than 50 routing rules, +you can use object redirect. For more information, see Configuring an Object Redirect in +the Amazon S3 User Guide. The maximum request length is limited to 128 KB. # Arguments - `bucket`: The bucket name. @@ -5513,15 +6503,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_bucket_website( Bucket, WebsiteConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -5559,79 +6549,84 @@ end put_object(bucket, key) put_object(bucket, key, params::Dict{String,<:Any}) -Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to -it. Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 -added the entire object to the bucket. You cannot use PutObject to only update a single -piece of metadata for an existing object. You must put the entire object with updated -metadata if you want to update some values. Amazon S3 is a distributed system. If it -receives multiple write requests for the same object simultaneously, it overwrites all but -the last object written. To prevent objects from being deleted or overwritten, you can use -Amazon S3 Object Lock. To ensure that data is not corrupted traversing the network, use the -Content-MD5 header. When you use this header, Amazon S3 checks the object against the -provided MD5 value and, if they do not match, returns an error. Additionally, you can -calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the -calculated MD5 value. To successfully complete the PutObject request, you must have the -s3:PutObject in your IAM permissions. To successfully change the objects acl of your -PutObject request, you must have the s3:PutObjectAcl in your IAM permissions. To -successfully set the tag-set with your PutObject request, you must have the -s3:PutObjectTagging in your IAM permissions. The Content-MD5 header is required for any -request to upload an object with a retention period configured using Amazon S3 Object Lock. -For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the -Amazon S3 User Guide. You have three mutually exclusive options to protect data using -server-side encryption in Amazon S3, depending on how you choose to manage the encryption -keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon -Web Services KMS keys (SSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts -data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You -can optionally tell Amazon S3 to encrypt data at by rest using server-side encryption with -other key options. For more information, see Using Server-Side Encryption. When adding a -new object, you can use headers to grant ACL-based permissions to individual Amazon Web -Services accounts or to predefined groups defined by Amazon S3. These permissions are then -added to the ACL on the object. By default, all objects are private. Only the owner has -full access control. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're uploading objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon -Web Services accounts) fail and return a 400 error with the error code -AccessControlListNotSupported. For more information, see Controlling ownership of objects -and disabling ACLs in the Amazon S3 User Guide. If your bucket uses the bucket owner -enforced setting for Object Ownership, all objects written to the bucket by any account -will be owned by the bucket owner. By default, Amazon S3 uses the STANDARD Storage Class -to store newly created objects. The STANDARD storage class provides high durability and -high availability. Depending on performance needs, you can specify a different Storage -Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, -see Storage Classes in the Amazon S3 User Guide. If you enable versioning for a bucket, -Amazon S3 automatically generates a unique version ID for the object being stored. Amazon -S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 -receives multiple write requests for the same object simultaneously, it stores all of the -objects. For more information about versioning, see Adding Objects to Versioning-Enabled -Buckets. For information about returning the versioning state of a bucket, see -GetBucketVersioning. For more information about related Amazon S3 APIs, see the following: - CopyObject DeleteObject +Adds an object to a bucket. Amazon S3 never adds partial objects; if you receive a +success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject +to only update a single piece of metadata for an existing object. You must put the entire +object with updated metadata if you want to update some values. If your bucket uses the +bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect +permissions. All objects written to the bucket by any account will be owned by the bucket +owner. Directory buckets - For directory buckets, you must make requests for this API +operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in +the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +requests are not supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. Amazon S3 is a distributed system. If it receives multiple write +requests for the same object simultaneously, it overwrites all but the last object written. +However, Amazon S3 provides features that can modify this behavior: S3 Object Lock - To +prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the +Amazon S3 User Guide. This functionality is not supported for directory buckets. S3 +Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write +requests for the same object simultaneously, it stores all versions of the objects. For +each write request that is made to the same object, Amazon S3 automatically generates a +unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or +delete any version of the object. For more information about versioning, see Adding Objects +to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning +the versioning state of a bucket, see GetBucketVersioning. This functionality is not +supported for directory buckets. Permissions General purpose bucket permissions - +The following permissions are required in your policies when your PutObject request +includes specific headers. s3:PutObject - To successfully complete the PutObject +request, you must always have the s3:PutObject permission on a bucket to add an object to +it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully set +the tag-set with your PutObject request, you must have the s3:PutObjectTagging. +Directory bucket permissions - To grant access to this API operation on a directory bucket, +we recommend that you use the CreateSession API operation for session-based +authorization. Specifically, you grant the s3express:CreateSession permission to the +directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the +CreateSession API call on the bucket to obtain a session token. With the session token in +your request header, you can make API requests to this operation. After the session token +expires, you make another CreateSession API call to generate a new session token for use. +Amazon Web Services CLI or SDKs create session and refresh the session token automatically +to avoid service interruptions when a session expires. For more information about +authorization, see CreateSession . Data integrity with Content-MD5 General purpose +bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 +header. When you use this header, Amazon S3 checks the object against the provided MD5 +value and, if they do not match, Amazon S3 returns an error. Alternatively, when the +object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to +Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory bucket - +This functionality is not supported for directory buckets. HTTP Host header syntax +Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. For more information about related +Amazon S3 APIs, see the following: CopyObject DeleteObject # Arguments -- `bucket`: The bucket name to which the PUT action was initiated. When using this action - with an access point, you must direct requests to the access point hostname. The access - point hostname takes the form - AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with - an access point through the Amazon Web Services SDKs, you provide the access point ARN in - place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. +- `bucket`: The bucket name to which the PUT action was initiated. Directory buckets - + When you use this operation with a directory bucket, you must use virtual-hosted-style + requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style + requests are not supported. Directory bucket names must be unique in the chosen + Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming + restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points + - When you use this action with an access point, you must provide the alias of the access + point in place of the bucket name or specify the access point ARN. When using the access + point ARN, you must direct requests to the access point hostname. The access point hostname + takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using + this action with an access point through the Amazon Web Services SDKs, you provide the + access point ARN in place of the bucket name. For more information about access point ARNs, + see Using access points in the Amazon S3 User Guide. Access points and Object Lambda + access points are not supported by directory buckets. S3 on Outposts - When you use this + action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + The S3 on Outposts hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the PUT action was initiated. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Body"`: Object data. -- `"Cache-Control"`: Can be used to specify caching behavior along the request/reply - chain. For more information, see - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. +- `"Cache-Control"`: Can be used to specify caching behavior along the request/reply chain. + For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. - `"Content-Disposition"`: Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4. - `"Content-Encoding"`: Specifies what content encodings have been applied to the object @@ -5646,13 +6641,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more - information about REST request authentication, see REST Authentication. + information about REST request authentication, see REST Authentication. The Content-MD5 + header is required for any request to upload an object with a retention period configured + using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon + S3 Object Lock Overview in the Amazon S3 User Guide. This functionality is not supported + for directory buckets. - `"Content-Type"`: A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. - `"Expires"`: The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3. - `"x-amz-acl"`: The canned ACL to apply to the object. For more information, see Canned - ACL. This action is not supported by Amazon S3 on Outposts. + ACL in the Amazon S3 User Guide. When adding a new object, you can use headers to grant + ACL-based permissions to individual Amazon Web Services accounts or to predefined groups + defined by Amazon S3. These permissions are then added to the ACL on the object. By + default, all objects are private. Only the owner has full access control. For more + information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in + the Amazon S3 User Guide. If the bucket that you're uploading objects to uses the bucket + owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect + permissions. Buckets that use this setting only accept PUT requests that don't specify an + ACL or PUT requests that specify bucket owner full control ACLs, such as the + bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML + format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon + Web Services accounts) fail and return a 400 error with the error code + AccessControlListNotSupported. For more information, see Controlling ownership of objects + and disabling ACLs in the Amazon S3 User Guide. This functionality is not supported for + directory buckets. This functionality is not supported for Amazon S3 on Outposts. - `"x-amz-checksum-crc32"`: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking @@ -5670,75 +6683,109 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-grant-full-control"`: Gives the grantee READ, READ_ACP, and WRITE_ACP permissions - on the object. This action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read"`: Allows grantee to read the object data and its metadata. This - action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read-acp"`: Allows grantee to read the object ACL. This action is not - supported by Amazon S3 on Outposts. + on the object. This functionality is not supported for directory buckets. This + functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-read"`: Allows grantee to read the object data and its metadata. This + functionality is not supported for directory buckets. This functionality is not supported + for Amazon S3 on Outposts. +- `"x-amz-grant-read-acp"`: Allows grantee to read the object ACL. This functionality is + not supported for directory buckets. This functionality is not supported for Amazon S3 on + Outposts. - `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable object. - This action is not supported by Amazon S3 on Outposts. + This functionality is not supported for directory buckets. This functionality is not + supported for Amazon S3 on Outposts. - `"x-amz-meta-"`: A map of metadata to store with the object in S3. - `"x-amz-object-lock-legal-hold"`: Specifies whether a legal hold will be applied to this - object. For more information about S3 Object Lock, see Object Lock. + object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User + Guide. This functionality is not supported for directory buckets. - `"x-amz-object-lock-mode"`: The Object Lock mode that you want to apply to this object. + This functionality is not supported for directory buckets. - `"x-amz-object-lock-retain-until-date"`: The date and time when you want this object's - Object Lock to expire. Must be formatted as a timestamp parameter. + Object Lock to expire. Must be formatted as a timestamp parameter. This functionality is + not supported for directory buckets. - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. -- `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm + header, replace algorithm with the supported algorithm from the following list: CRC32 + CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon + S3 User Guide. If the individual checksum value you provide through + x-amz-checksum-algorithm doesn't match the checksum algorithm you set through + x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter + and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm + . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default + checksum algorithm that's used for performance. +- `"x-amz-server-side-encryption"`: The server-side encryption algorithm that was used when + you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). General + purpose buckets - You have four mutually exclusive options to protect data using + server-side encryption in Amazon S3, depending on how you choose to manage the encryption + keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon + Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 + encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by + default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side + encryption with other key options. For more information, see Using Server-Side Encryption + in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the + server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. - `"x-amz-server-side-encryption-aws-kms-key-id"`: If x-amz-server-side-encryption has a - valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key - Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that - was used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not - provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services - managed key to protect the data. If the KMS key does not exist in the same account issuing - the command, you must use the full ARN and not just the ID. + valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or + Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key + that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or + x-amz-server-side-encryption:aws:kms:dsse, but do not provide + x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed + key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's + issuing the command, you must use the full ARN and not just the ID. This functionality is + not supported for directory buckets. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action + doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services - KMS for future GetObject or CopyObject operations on this object. -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). + KMS for future GetObject or CopyObject operations on this object. This value must be + explicitly added during CopyObject operations. This functionality is not supported for + directory buckets. +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the - x-amz-server-side-encryption-customer-algorithm header. + x-amz-server-side-encryption-customer-algorithm header. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported for directory buckets. - `"x-amz-storage-class"`: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see - Storage Classes in the Amazon S3 User Guide. + For more information, see Storage Classes in the Amazon S3 User Guide. For directory + buckets, only the S3 Express One Zone storage class is supported to store newly created + objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - `"x-amz-tagging"`: The tag-set for the object. The tag-set must be encoded as URL Query - parameters. (For example, \"Key1=Value1\") + parameters. (For example, \"Key1=Value1\") This functionality is not supported for + directory buckets. - `"x-amz-website-redirect-location"`: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object - metadata, see Object Key and Metadata. In the following example, the request header sets - the redirect to an object (anotherPage.html) in the same bucket: - x-amz-website-redirect-location: /anotherPage.html In the following example, the request - header sets the object redirect to another website: x-amz-website-redirect-location: - http://www.example.com/ For more information about website hosting in Amazon S3, see - Hosting Websites on Amazon S3 and How to Configure Website Page Redirects. + metadata, see Object Key and Metadata in the Amazon S3 User Guide. In the following + example, the request header sets the redirect to an object (anotherPage.html) in the same + bucket: x-amz-website-redirect-location: /anotherPage.html In the following example, the + request header sets the object redirect to another website: + x-amz-website-redirect-location: http://www.example.com/ For more information about + website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure + Website Page Redirects in the Amazon S3 User Guide. This functionality is not supported + for directory buckets. """ function put_object(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -5764,45 +6811,46 @@ end put_object_acl(bucket, key) put_object_acl(bucket, key, params::Dict{String,<:Any}) -Uses the acl subresource to set the access control list (ACL) permissions for a new or -existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an -object. For more information, see What permissions can I grant? in the Amazon S3 User -Guide. This action is not supported by Amazon S3 on Outposts. Depending on your application -needs, you can choose to set the ACL on an object using either the request body or the -headers. For example, if you have an existing application that updates a bucket ACL using -the request body, you can continue to use that approach. For more information, see Access -Control List (ACL) Overview in the Amazon S3 User Guide. If your bucket uses the bucket -owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect -permissions. You must use policies to grant access to your bucket and the objects in it. -Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error -code. Requests to read ACLs are still supported. For more information, see Controlling -object ownership in the Amazon S3 User Guide. Permissions You can set access permissions -using one of the following methods: Specify a canned ACL with the x-amz-acl request -header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL -has a predefined set of grantees and permissions. Specify the canned ACL name as the value -of x-amz-acl. If you use this header, you cannot use other access control-specific headers -in your request. For more information, see Canned ACL. Specify access permissions -explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and -x-amz-grant-full-control headers. When using these headers, you specify explicit access -permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will -receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl -header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 -supports in an ACL. For more information, see Access Control List (ACL) Overview. You -specify each grantee as a type=value pair, where the type is one of the following: id -– if the value specified is the canonical user ID of an Amazon Web Services account -uri – if you are granting permissions to a predefined group emailAddress – if the -value specified is the email address of an Amazon Web Services account Using email -addresses to specify a grantee is only supported in the following Amazon Web Services -Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia -Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -South America (São Paulo) For a list of all the Amazon S3 supported Regions and -endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For -example, the following x-amz-grant-read header grants list objects permission to the two -Amazon Web Services accounts identified by their email addresses. x-amz-grant-read: -emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\" You can use either a -canned ACL or specify access permissions explicitly. You cannot do both. Grantee Values -You can specify the person (grantee) to whom you're assigning access rights (using request -elements) in the following ways: By the person's ID: <Grantee + This operation is not supported by directory buckets. Uses the acl subresource to set the +access control list (ACL) permissions for a new or existing object in an S3 bucket. You +must have the WRITE_ACP permission to set the ACL of an object. For more information, see +What permissions can I grant? in the Amazon S3 User Guide. This functionality is not +supported for Amazon S3 on Outposts. Depending on your application needs, you can choose to +set the ACL on an object using either the request body or the headers. For example, if you +have an existing application that updates a bucket ACL using the request body, you can +continue to use that approach. For more information, see Access Control List (ACL) Overview +in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for S3 +Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies +to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs +fail and return the AccessControlListNotSupported error code. Requests to read ACLs are +still supported. For more information, see Controlling object ownership in the Amazon S3 +User Guide. Permissions You can set access permissions using one of the following +methods: Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set +of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this +header, you cannot use other access control-specific headers in your request. For more +information, see Canned ACL. Specify access permissions explicitly with the +x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +headers. When using these headers, you specify explicit access permissions and grantees +(Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you +use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These +parameters map to the set of permissions that Amazon S3 supports in an ACL. For more +information, see Access Control List (ACL) Overview. You specify each grantee as a +type=value pair, where the type is one of the following: id – if the value specified +is the canonical user ID of an Amazon Web Services account uri – if you are granting +permissions to a predefined group emailAddress – if the value specified is the email +address of an Amazon Web Services account Using email addresses to specify a grantee is +only supported in the following Amazon Web Services Regions: US East (N. Virginia) US +West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia Pacific +(Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São Paulo) For a +list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the +Amazon Web Services General Reference. For example, the following x-amz-grant-read +header grants list objects permission to the two Amazon Web Services accounts identified by +their email addresses. x-amz-grant-read: emailAddress=\"xyz@amazon.com\", +emailAddress=\"abc@amazon.com\" You can use either a canned ACL or specify access +permissions explicitly. You cannot do both. Grantee Values You can specify the person +(grantee) to whom you're assigning access rights (using request elements) in the following +ways: By the person's ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName>< ;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional @@ -5826,24 +6874,21 @@ CopyObject GetObject # Arguments - `bucket`: The bucket name that contains the object to which you want to attach the ACL. - When using this action with an access point, you must direct requests to the access point - hostname. The access point hostname takes the form + Access points - When you use this action with an access point, you must provide the alias + of the access point in place of the bucket name or specify the access point ARN. When using + the access point ARN, you must direct requests to the access point hostname. The access + point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. -- `key`: Key for which the PUT action was initiated. When using this action with an access - point, you must direct requests to the access point hostname. The access point hostname - takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using - this action with an access point through the Amazon Web Services SDKs, you provide the - access point ARN in place of the bucket name. For more information about access point ARNs, - see Using access points in the Amazon S3 User Guide. When you use this action with Amazon + points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. +- `key`: Key for which the PUT action was initiated. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5854,31 +6899,32 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys transit. For more information, go to RFC 1864.> For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. -- `"versionId"`: VersionId used to reference a specific version of the object. +- `"versionId"`: Version ID used to reference a specific version of the object. This + functionality is not supported for directory buckets. - `"x-amz-acl"`: The canned ACL to apply to the object. For more information, see Canned ACL. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-grant-full-control"`: Allows grantee the read, write, read ACP, and write ACP - permissions on the bucket. This action is not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read"`: Allows grantee to list the objects in the bucket. This action is - not supported by Amazon S3 on Outposts. -- `"x-amz-grant-read-acp"`: Allows grantee to read the bucket ACL. This action is not - supported by Amazon S3 on Outposts. + permissions on the bucket. This functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-read"`: Allows grantee to list the objects in the bucket. This + functionality is not supported for Amazon S3 on Outposts. +- `"x-amz-grant-read-acp"`: Allows grantee to read the bucket ACL. This functionality is + not supported for Amazon S3 on Outposts. - `"x-amz-grant-write"`: Allows grantee to create new objects in the bucket. For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects. - `"x-amz-grant-write-acp"`: Allows grantee to write the ACL for the applicable bucket. - This action is not supported by Amazon S3 on Outposts. + This functionality is not supported for Amazon S3 on Outposts. - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_object_acl(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -5907,13 +6953,16 @@ end put_object_legal_hold(bucket, key) put_object_legal_hold(bucket, key, params::Dict{String,<:Any}) -Applies a legal hold configuration to the specified object. For more information, see -Locking Objects. This action is not supported by Amazon S3 on Outposts. + This operation is not supported by directory buckets. Applies a legal hold configuration +to the specified object. For more information, see Locking Objects. This functionality is +not supported for Amazon S3 on Outposts. # Arguments - `bucket`: The bucket name containing the object that you want to place a legal hold on. - When using this action with an access point, you must direct requests to the access point - hostname. The access point hostname takes the form + Access points - When you use this action with an access point, you must provide the alias + of the access point in place of the bucket name or specify the access point ARN. When using + the access point ARN, you must direct requests to the access point hostname. The access + point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access @@ -5929,16 +6978,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the specified object. - `"versionId"`: The version ID of the object that you want to place a legal hold on. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_object_legal_hold( Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config() @@ -5969,13 +7018,13 @@ end put_object_lock_configuration(bucket) put_object_lock_configuration(bucket, params::Dict{String,<:Any}) -Places an Object Lock configuration on the specified bucket. The rule specified in the -Object Lock configuration will be applied by default to every new object placed in the -specified bucket. For more information, see Locking Objects. The DefaultRetention -settings require both a mode and a period. The DefaultRetention period can be either Days -or Years but you must select one. You cannot specify Days and Years at the same time. You -can only enable Object Lock for new buckets. If you want to turn on Object Lock for an -existing bucket, contact Amazon Web Services Support. + This operation is not supported by directory buckets. Places an Object Lock configuration +on the specified bucket. The rule specified in the Object Lock configuration will be +applied by default to every new object placed in the specified bucket. For more +information, see Locking Objects. The DefaultRetention settings require both a mode and +a period. The DefaultRetention period can be either Days or Years but you must select +one. You cannot specify Days and Years at the same time. You can enable Object Lock for +new or existing buckets. For more information, see Configuring Object Lock. # Arguments - `bucket`: The bucket whose Object Lock configuration you want to create or replace. @@ -5990,16 +7039,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-bucket-object-lock-token"`: A token to allow Object Lock to be enabled for an existing bucket. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_object_lock_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -6027,16 +7076,19 @@ end put_object_retention(bucket, key) put_object_retention(bucket, key, params::Dict{String,<:Any}) -Places an Object Retention configuration on an object. For more information, see Locking -Objects. Users or accounts require the s3:PutObjectRetention permission in order to place -an Object Retention configuration on objects. Bypassing a Governance Retention -configuration requires the s3:BypassGovernanceRetention permission. This action is not -supported by Amazon S3 on Outposts. + This operation is not supported by directory buckets. Places an Object Retention +configuration on an object. For more information, see Locking Objects. Users or accounts +require the s3:PutObjectRetention permission in order to place an Object Retention +configuration on objects. Bypassing a Governance Retention configuration requires the +s3:BypassGovernanceRetention permission. This functionality is not supported for Amazon S3 +on Outposts. # Arguments - `bucket`: The bucket name that contains the object you want to apply this Object - Retention configuration to. When using this action with an access point, you must direct - requests to the access point hostname. The access point hostname takes the form + Retention configuration to. Access points - When you use this action with an access + point, you must provide the alias of the access point in place of the bucket name or + specify the access point ARN. When using the access point ARN, you must direct requests to + the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access @@ -6055,16 +7107,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-bypass-governance-retention"`: Indicates whether this action should bypass Governance-mode restrictions. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_object_retention( Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config() @@ -6095,38 +7147,40 @@ end put_object_tagging(bucket, key, tagging) put_object_tagging(bucket, key, tagging, params::Dict{String,<:Any}) -Sets the supplied tag-set to an object that already exists in a bucket. A tag is a -key-value pair. You can associate tags with an object by sending a PUT request against the -tagging subresource that is associated with the object. You can retrieve tags by sending a -GET request. For more information, see GetObjectTagging. For tagging-related restrictions -related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the -maximum number of tags to 10 tags per object. To use this operation, you must have -permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this -permission and can grant this permission to others. To put tags of any other version, use -the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging -action. For information about the Amazon S3 object tagging feature, see Object Tagging. -PutObjectTagging has the following special errors: Code: InvalidTagError Cause: -The tag provided was not a valid tag. This error can occur if the tag did not pass input -validation. For more information, see Object Tagging. Code: MalformedXMLError -Cause: The XML provided does not match the schema. Code: OperationAbortedError -Cause: A conflicting conditional action is currently in progress against this resource. -Please try again. Code: InternalError Cause: The service was unable to apply -the provided tag to the object. The following operations are related to -PutObjectTagging: GetObjectTagging DeleteObjectTagging + This operation is not supported by directory buckets. Sets the supplied tag-set to an +object that already exists in a bucket. A tag is a key-value pair. For more information, +see Object Tagging. You can associate tags with an object by sending a PUT request against +the tagging subresource that is associated with the object. You can retrieve tags by +sending a GET request. For more information, see GetObjectTagging. For tagging-related +restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 +limits the maximum number of tags to 10 tags per object. To use this operation, you must +have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has +this permission and can grant this permission to others. To put tags of any other version, +use the versionId query parameter. You also need permission for the +s3:PutObjectVersionTagging action. PutObjectTagging has the following special errors. For +more Amazon S3 errors see, Error Responses. InvalidTag - The tag provided was not a +valid tag. This error can occur if the tag did not pass input validation. For more +information, see Object Tagging. MalformedXML - The XML provided does not match the +schema. OperationAborted - A conflicting conditional action is currently in progress +against this resource. Please try again. InternalError - The service was unable to apply +the provided tag to the object. The following operations are related to PutObjectTagging: + GetObjectTagging DeleteObjectTagging # Arguments -- `bucket`: The bucket name containing the object. When using this action with an access - point, you must direct requests to the access point hostname. The access point hostname - takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using - this action with an access point through the Amazon Web Services SDKs, you provide the - access point ARN in place of the bucket name. For more information about access point ARNs, - see Using access points in the Amazon S3 User Guide. When you use this action with Amazon +- `bucket`: The bucket name containing the object. Access points - When you use this + action with an access point, you must provide the alias of the access point in place of the + bucket name or specify the access point ARN. When using the access point ARN, you must + direct requests to the access point hostname. The access point hostname takes the form + AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with + an access point through the Amazon Web Services SDKs, you provide the access point ARN in + place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Name of the object key. - `tagging`: Container for the TagSet and Tag elements @@ -6137,16 +7191,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys calculated automatically. - `"versionId"`: The versionId of the object that the tag-set will be added to. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_object_tagging( Bucket, Key, Tagging; aws_config::AbstractAWSConfig=global_aws_config() @@ -6179,17 +7233,18 @@ end put_public_access_block(bucket, public_access_block_configuration) put_public_access_block(bucket, public_access_block_configuration, params::Dict{String,<:Any}) -Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use -this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more -information about Amazon S3 permissions, see Specifying Permissions in a Policy. When -Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it -checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains -the object) and the bucket owner's account. If the PublicAccessBlock configurations are -different between the bucket and the account, Amazon S3 uses the most restrictive -combination of the bucket-level and account-level settings. For more information about -when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\". The -following operations are related to PutPublicAccessBlock: GetPublicAccessBlock -DeletePublicAccessBlock GetBucketPolicyStatus Using Amazon S3 Block Public Access + This operation is not supported by directory buckets. Creates or modifies the +PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must +have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 +permissions, see Specifying Permissions in a Policy. When Amazon S3 evaluates the +PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock +configuration for both the bucket (or the bucket that contains the object) and the bucket +owner's account. If the PublicAccessBlock configurations are different between the bucket +and the account, Amazon S3 uses the most restrictive combination of the bucket-level and +account-level settings. For more information about when Amazon S3 considers a bucket or an +object public, see The Meaning of \"Public\". The following operations are related to +PutPublicAccessBlock: GetPublicAccessBlock DeletePublicAccessBlock +GetBucketPolicyStatus Using Amazon S3 Block Public Access # Arguments - `bucket`: The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want @@ -6205,15 +7260,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function put_public_access_block( Bucket, @@ -6257,114 +7312,97 @@ end restore_object(bucket, key) restore_object(bucket, key, params::Dict{String,<:Any}) -Restores an archived copy of an object back into Amazon S3 This action is not supported by -Amazon S3 on Outposts. This action performs the following types of requests: select - -Perform a select query on an archived object restore an archive - Restore an archived -object For more information about the S3 structure in the request body, see the + This operation is not supported by directory buckets. Restores an archived copy of an +object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. +This action performs the following types of requests: restore an archive - Restore an +archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide -Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Define the SQL -expression for the SELECT type of restoration for your query in the request body's -SelectParameters structure. You can use expressions like the following examples. The -following expression returns all records from the specified object. SELECT * FROM Object - Assuming that you are not using any headers for data stored in the object, you can specify -columns with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > 100 If -you have headers and you set the fileHeaderInfo in the CSV structure in the request body to -USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, -the first row is skipped for the query.) You cannot mix ordinal positions with header -column names. SELECT s.Id, s.FirstName, s.SSN FROM S3Object s When making a select -request, you can also do the following: To expedite your queries, specify the Expedited -tier. For more information about tiers, see \"Restoring Archives,\" later in this topic. -Specify details about the data serialization format of both the input object that is being -queried and the serialization of the CSV-encoded query results. The following are -additional important facts about the select feature: The output results are new Amazon S3 -objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or -through a lifecycle configuration. You can issue more than one select request on the same -Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate -requests. Amazon S3 accepts a select request even if the object has already been -restored. A select request doesn’t return error response 409. Permissions To use this -operation, you must have permissions to perform the s3:RestoreObject action. The bucket -owner has this permission by default and can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations and -Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. -Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval or S3 -Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the -S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first -initiate a restore request, and then wait until a temporary copy of the object is -available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 -Standard storage class in your S3 bucket. To access an archived object, you must restore -the object for the duration (number of days) that you specify. For objects in the Archive -Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -restore request, and then wait until the object is moved into the Frequent Access tier. To -restore a specific object version, you can provide a version ID. If you don't provide a -version ID, Amazon S3 restores the current version. When restoring an archived object, you -can specify one of the following data access tier options in the Tier element of the -request body: Expedited - Expedited retrievals allow you to quickly access your data -stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive -tier when occasional urgent requests for restoring archives are required. For all but the -largest archived objects (250 MB+), data accessed using Expedited retrievals is typically -made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity -for Expedited retrievals is available when you need it. Expedited retrievals and +Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions +To use this operation, you must have permissions to perform the s3:RestoreObject action. +The bucket owner has this permission by default and can grant this permission to others. +For more information about permissions, see Permissions Related to Bucket Subresource +Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 +User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible +Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 +Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not +accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible +Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore +request, and then wait until a temporary copy of the object is available. If you want a +permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class +in your S3 bucket. To access an archived object, you must restore the object for the +duration (number of days) that you specify. For objects in the Archive Access or Deep +Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, +and then wait until the object is moved into the Frequent Access tier. To restore a +specific object version, you can provide a version ID. If you don't provide a version ID, +Amazon S3 restores the current version. When restoring an archived object, you can specify +one of the following data access tier options in the Tier element of the request body: +Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 +Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +Archive tier when occasional urgent requests for restoring archives are required. For all +but the largest archived objects (250 MB+), data accessed using Expedited retrievals is +typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval +capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 -Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage -class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects -stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the -S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to -retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically -finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage -class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost -retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish -within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 -Intelligent-Tiering Deep Archive tier. For more information about archive retrieval -options and provisioned capacity for Expedited data access, see Restoring Archived Objects -in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the -restore speed to a faster speed while it is in progress. For more information, see -Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the -status of object restoration, you can send a HEAD request. Operations return the -x-amz-restore header, which provides information about the restoration status, in the -response. You can use Amazon S3 event notifications to notify you when a restore is -initiated or completed. For more information, see Configuring Amazon S3 Event Notifications -in the Amazon S3 User Guide. After restoring an archived object, you can update the -restoration period by reissuing the request with a new period. Amazon S3 updates the -restoration period relative to the current time and charges only for the request-there are -no data transfer charges. You cannot update the restoration period when Amazon S3 is -actively processing your current restore request for the object. If your bucket has a -lifecycle configuration with a rule that includes an expiration action, the object -expiration overrides the life span that you specify in a restore request. For example, if -you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, -Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, -see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User -Guide. Responses A successful action returns either the 200 OK or 202 Accepted status -code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in -the response. If the object is previously restored, Amazon S3 returns 200 OK in the -response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore -is already in progress. (This error does not apply to SELECT type requests.) HTTP -Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: -GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not -available. Try again later. (Returned if there is insufficient capacity to process the -Expedited request. This error applies only to Expedited retrievals and not to S3 Standard -or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The -following operations are related to RestoreObject: PutBucketLifecycleConfiguration -GetBucketNotificationConfiguration +Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier +Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for +objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage +classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk +retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier +Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. +Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 +Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 +Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more +information about archive retrieval options and provisioned capacity for Expedited data +access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 +restore speed upgrade to change the restore speed to a faster speed while it is in +progress. For more information, see Upgrading the speed of an in-progress restore in the +Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD +request. Operations return the x-amz-restore header, which provides information about the +restoration status, in the response. You can use Amazon S3 event notifications to notify +you when a restore is initiated or completed. For more information, see Configuring Amazon +S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you +can update the restoration period by reissuing the request with a new period. Amazon S3 +updates the restoration period relative to the current time and charges only for the +request-there are no data transfer charges. You cannot update the restoration period when +Amazon S3 is actively processing your current restore request for the object. If your +bucket has a lifecycle configuration with a rule that includes an expiration action, the +object expiration overrides the life span that you specify in a restore request. For +example, if you restore an object copy for 10 days, but the object is scheduled to expire +in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle +configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in +Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 +Accepted status code. If the object is not previously restored, then Amazon S3 returns +202 Accepted in the response. If the object is previously restored, Amazon S3 returns +200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: +Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault +Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: +expedited retrievals are currently not available. Try again later. (Returned if there is +insufficient capacity to process the Expedited request. This error applies only to +Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 + SOAP Fault Code Prefix: N/A The following operations are related to +RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration # Arguments -- `bucket`: The bucket name containing the object to restore. When using this action with - an access point, you must direct requests to the access point hostname. The access point - hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - using this action with an access point through the Amazon Web Services SDKs, you provide - the access point ARN in place of the bucket name. For more information about access point - ARNs, see Using access points in the Amazon S3 User Guide. When you use this action with - Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - Outposts hostname takes the form +- `bucket`: The bucket name containing the object to restore. Access points - When you + use this action with an access point, you must provide the alias of the access point in + place of the bucket name or specify the access point ARN. When using the access point ARN, + you must direct requests to the access point hostname. The access point hostname takes the + form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action + with an access point through the Amazon Web Services SDKs, you provide the access point ARN + in place of the bucket name. For more information about access point ARNs, see Using access + points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon + S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the action was initiated. # Optional Parameters @@ -6372,16 +7410,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"RestoreRequest"`: - `"versionId"`: VersionId used to reference a specific version of the object. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. """ function restore_object(Bucket, Key; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -6410,44 +7448,48 @@ end select_object_content(bucket, expression, expression_type, input_serialization, key, output_serialization) select_object_content(bucket, expression, expression_type, input_serialization, key, output_serialization, params::Dict{String,<:Any}) -This action filters the contents of an Amazon S3 object based on a simple structured query -language (SQL) statement. In the request, along with the SQL expression, you must also -specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 -uses this format to parse object data into records, and returns only records that match the -specified SQL expression. You must also specify the data serialization format for the -response. This action is not supported by Amazon S3 on Outposts. For more information about -Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 -User Guide. Permissions You must have s3:GetObject permission for this -operation. Amazon S3 Select does not support anonymous access. For more information about -permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object -Data Formats You can use Amazon S3 Select to query objects that have the following format -properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. -UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV -and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only -compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select -supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not -support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 -Select supports querying objects that are protected with server-side encryption. For -objects that are encrypted with customer-provided encryption keys (SSE-C), you must use -HTTPS, and you must use the headers that are documented in the GetObject. For more -information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption -Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed -keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled -transparently, so you don't need to specify anything. For more information about -server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side -Encryption in the Amazon S3 User Guide. Working with the Response Body Given the -response size is unknown, Amazon S3 Select streams the response as a series of messages and -includes a Transfer-Encoding header with chunked as its value in the response. For more -information, see Appendix: SelectObjectContent Response. GetObject Support The -SelectObjectContent action does not support the following GetObject functionality. For more -information, see GetObject. Range: Although you can specify a scan range for an Amazon -S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), -you cannot specify the range of bytes of an object to return. GLACIER, DEEP_ARCHIVE and -REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or -REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage -Classes in the Amazon S3 User Guide. Special Errors For a list of special errors for -this operation, see List of SELECT Object Content Error Codes The following operations -are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration + This operation is not supported by directory buckets. This action filters the contents of +an Amazon S3 object based on a simple structured query language (SQL) statement. In the +request, along with the SQL expression, you must also specify a data serialization format +(JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object +data into records, and returns only records that match the specified SQL expression. You +must also specify the data serialization format for the response. This functionality is not +supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see +Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. +Permissions You must have the s3:GetObject permission for this operation. Amazon S3 +Select does not support anonymous access. For more information about permissions, see +Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You +can use Amazon S3 Select to query objects that have the following format properties: +CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 +is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files +can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that +Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar +compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support +whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select +supports querying objects that are protected with server-side encryption. For objects that +are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you +must use the headers that are documented in the GetObject. For more information about +SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon +S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and +Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so +you don't need to specify anything. For more information about server-side encryption, +including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the +Amazon S3 User Guide. Working with the Response Body Given the response size is +unknown, Amazon S3 Select streams the response as a series of messages and includes a +Transfer-Encoding header with chunked as its value in the response. For more information, +see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent +action does not support the following GetObject functionality. For more information, see +GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request +(see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify +the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and +REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access +tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, +DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or +DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more +information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 +User Guide. Special Errors For a list of special errors for this operation, see List of +SELECT Object Content Error Codes The following operations are related to +SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration # Arguments @@ -6474,8 +7516,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys <scanrange><end>50</end></scanrange> - process only the records within the last 50 bytes of the file. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-server-side-encryption-customer-algorithm"`: The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C @@ -6545,70 +7587,98 @@ end upload_part(bucket, key, part_number, upload_id) upload_part(bucket, key, part_number, upload_id, params::Dict{String,<:Any}) -Uploads a part in a multipart upload. In this operation, you provide part data in your -request. However, you have an option to specify your existing Amazon S3 object as a data -source for the part you are uploading. To upload a part from an existing object, you use -the UploadPartCopy operation. You must initiate a multipart upload (see +Uploads a part in a multipart upload. In this operation, you provide new data as a part of +an object in your request. However, you have an option to specify your existing Amazon S3 +object as a data source for the part you are uploading. To upload a part from an existing +object, you use the UploadPartCopy operation. You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate -request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your +request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits -in the Amazon S3 User Guide. To ensure that data is not corrupted when traversing the -network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the -part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. - If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses -the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information -see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature -Version 4). Note: After you initiate multipart upload and upload one or more parts, you -must either complete or abort multipart upload in order to stop getting charged for storage -of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 -frees up the parts storage and stops charging you for the parts storage. For more -information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User -Guide . For information on the permissions required to use the multipart upload API, go to -Multipart Upload and Permissions in the Amazon S3 User Guide. Server-side encryption is for -data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data -centers and decrypts it when you access it. You have three mutually exclusive options to -protect data using server-side encryption in Amazon S3, depending on how you choose to -manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed -keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). -Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) -by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side -encryption with other key options. The option you use depends on whether you want to use -KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide -your own encryption key, the request headers you provide in the request must match the -headers you used in the request to initiate the upload by using CreateMultipartUpload. For -more information, go to Using Server-Side Encryption in the Amazon S3 User Guide. -Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are -using a customer-provided encryption key (SSE-C), you don't need to specify the encryption -parameters in each UploadPart request. Instead, you only need to specify the server-side -encryption parameters in the initial Initiate Multipart request. For more information, see -CreateMultipartUpload. If you requested server-side encryption using a customer-provided -encryption key (SSE-C) in your initiate multipart upload request, you must provide -identical encryption information in each part upload using the following headers. +in the Amazon S3 User Guide. After you initiate multipart upload and upload one or more +parts, you must either complete or abort multipart upload in order to stop getting charged +for storage of the uploaded parts. Only after you either complete or abort multipart +upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. +For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 +User Guide . Directory buckets - For directory buckets, you must make requests for this +API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests +in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +Path-style requests are not supported. For more information, see Regional and Zonal +endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions +- For information on the permissions required to use the multipart upload API, see +Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket +permissions - To grant access to this API operation on a directory bucket, we recommend +that you use the CreateSession API operation for session-based authorization. +Specifically, you grant the s3express:CreateSession permission to the directory bucket in a +bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on +the bucket to obtain a session token. With the session token in your request header, you +can make API requests to this operation. After the session token expires, you make another +CreateSession API call to generate a new session token for use. Amazon Web Services CLI or +SDKs create session and refresh the session token automatically to avoid service +interruptions when a session expires. For more information about authorization, see +CreateSession . Data integrity General purpose bucket - To ensure that data is not +corrupted traversing the network, specify the Content-MD5 header in the upload part +request. Amazon S3 checks the part data against the provided MD5 value. If they do not +match, Amazon S3 returns an error. If the upload request is signed with Signature Version +4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead +of Content-MD5. For more information see Authenticating Requests: Using the Authorization +Header (Amazon Web Services Signature Version 4). Directory buckets - MD5 is not +supported by directory buckets. You can use checksum algorithms to check object integrity. + Encryption General purpose bucket - Server-side encryption is for data encryption at +rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and +decrypts it when you access it. You have mutually exclusive options to protect data using +server-side encryption in Amazon S3, depending on how you choose to manage the encryption +keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon +Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts +data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can +optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other +key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or +provide your own encryption key (SSE-C). Server-side encryption is supported by the S3 +Multipart Upload operations. Unless you are using a customer-provided encryption key +(SSE-C), you don't need to specify the encryption parameters in each UploadPart request. +Instead, you only need to specify the server-side encryption parameters in the initial +Initiate Multipart request. For more information, see CreateMultipartUpload. If you request +server-side encryption using a customer-provided encryption key (SSE-C) in your initiate +multipart upload request, you must provide identical encryption information in each part +upload using the following request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key - x-amz-server-side-encryption-customer-key-MD5 UploadPart has the following special -errors: Code: NoSuchUpload Cause: The specified multipart upload does not exist. -The upload ID might be invalid, or the multipart upload might have been aborted or -completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client -The following operations are related to UploadPart: CreateMultipartUpload -CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + x-amz-server-side-encryption-customer-key-MD5 Directory bucket - For directory +buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is +supported. For more information, see Using Server-Side Encryption in the Amazon S3 User +Guide. Special errors Error Code: NoSuchUpload Description: The specified multipart +upload does not exist. The upload ID might be invalid, or the multipart upload might have +been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: +Client HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +UploadPart: CreateMultipartUpload CompleteMultipartUpload AbortMultipartUpload + ListParts ListMultipartUploads # Arguments -- `bucket`: The name of the bucket to which the multipart upload was initiated. When using - this action with an access point, you must direct requests to the access point hostname. - The access point hostname takes the form +- `bucket`: The name of the bucket to which the multipart upload was initiated. Directory + buckets - When you use this operation with a directory bucket, you must use + virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `part_number`: Part number of part being uploaded. This is a positive integer between 1 and 10,000. @@ -6621,7 +7691,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the body cannot be determined automatically. - `"Content-MD5"`: The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object - lock parameters are specified. + lock parameters are specified. This functionality is not supported for directory buckets. - `"x-amz-checksum-crc32"`: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking @@ -6639,28 +7709,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the - bucket is owned by a different account, the request fails with the HTTP status code 403 - Forbidden (access denied). + account ID that you provide does not match the actual owner of the bucket, the request + fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: - `"x-amz-sdk-checksum-algorithm"`: Indicates the algorithm used to create the checksum for - the object when using the SDK. This header will not provide any additional functionality if - not using the SDK. When sending this header, there must be a corresponding x-amz-checksum - or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status - code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 - User Guide. If you provide an individual checksum, Amazon S3 ignores any provided - ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it - match the checksum value supplied in the CreateMultipartUpload request. -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). + the object when you use the SDK. This header will not provide any additional functionality + if you don't use the SDK. When you send this header, there must be a corresponding + x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + the HTTP status code 400 Bad Request. For more information, see Checking object integrity + in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any + provided ChecksumAlgorithm parameter. This checksum algorithm must be the same for all + parts and it match the checksum value supplied in the CreateMultipartUpload request. +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). This functionality is not supported for + directory buckets. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This must be the same encryption - key specified in the initiate multipart upload request. + key specified in the initiate multipart upload request. This functionality is not + supported for directory buckets. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported for directory buckets. """ function upload_part( Bucket, Key, partNumber, uploadId; aws_config::AbstractAWSConfig=global_aws_config() @@ -6700,61 +7773,85 @@ end upload_part_copy(bucket, key, part_number, upload_id, x-amz-copy-source) upload_part_copy(bucket, key, part_number, upload_id, x-amz-copy-source, params::Dict{String,<:Any}) -Uploads a part by copying data from an existing object as data source. You specify the data -source by adding the request header x-amz-copy-source in your request and a byte range by -adding the request header x-amz-copy-source-range in your request. For information about -maximum and minimum part sizes and other multipart upload specifications, see Multipart -upload limits in the Amazon S3 User Guide. Instead of using an existing object as part -data, you might use the UploadPart action and provide data in your request. You must -initiate a multipart upload before you can upload any part. In response to your initiate -request. Amazon S3 returns a unique identifier, the upload ID, that you must include in -your upload part request. For more information about using the UploadPartCopy operation, -see the following: For conceptual information about multipart uploads, see Uploading -Objects Using Multipart Upload in the Amazon S3 User Guide. For information about -permissions required to use the multipart upload API, see Multipart Upload and Permissions -in the Amazon S3 User Guide. For information about copying objects using a single atomic -action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. For +Uploads a part by copying data from an existing object as data source. To specify the data +source, you add the request header x-amz-copy-source in your request. To specify a byte +range, you add the request header x-amz-copy-source-range in your request. For information +about maximum and minimum part sizes and other multipart upload specifications, see +Multipart upload limits in the Amazon S3 User Guide. Instead of copying data from an +existing object as part data, you might use the UploadPart action to upload new data as a +part of an object in your request. You must initiate a multipart upload before you can +upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a +unique identifier that you must include in your upload part request. For conceptual +information about multipart uploads, see Uploading Objects Using Multipart Upload in the +Amazon S3 User Guide. For information about copying objects using a single atomic action +vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. Directory +buckets - For directory buckets, you must make requests for this API operation to the Zonal +endpoint. These endpoints support virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. Authentication and authorization All UploadPartCopy requests must be +authenticated and signed by using IAM credentials (access key ID and secret access key for +the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must +be signed. For more information, see REST Authentication. Directory buckets - You must use +IAM credentials to authenticate and authorize your access to the UploadPartCopy API +operation, instead of using the temporary security credentials through the CreateSession +API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on +your behalf. Permissions You must have READ access to the source object and WRITE access +to the destination bucket. General purpose bucket permissions - You must have the +permissions in a policy based on the bucket types of your source bucket and destination +bucket in an UploadPartCopy operation. If the source object is in a general purpose +bucket, you must have the s3:GetObject permission to read the source object that is being +copied. If the destination bucket is a general purpose bucket, you must have the +s3:PutObject permission to write the object copy to the destination bucket. For +information about permissions required to use the multipart upload API, see Multipart +Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - You +must have permissions in a bucket policy or an IAM identity-based policy based on the +source and destination bucket types in an UploadPartCopy operation. If the source object +that you want to copy is in a directory bucket, you must have the s3express:CreateSession +permission in the Action element of a policy to read the object . By default, the session +is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the +s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy +destination is a directory bucket, you must have the s3express:CreateSession permission +in the Action element of a policy to write the object to the destination. The +s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. +For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web +Services Identity and Access Management (IAM) identity-based policies for S3 Express One +Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with -the UploadPartCopy operation, see CopyObject and UploadPart. Note the following -additional considerations about the request headers x-amz-copy-source-if-match, -x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and -x-amz-copy-source-if-modified-since: Consideration 1 - If both of the -x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in -the request as follows: x-amz-copy-source-if-match condition evaluates to true, and; -x-amz-copy-source-if-unmodified-since condition evaluates to false; Amazon S3 returns 200 -OK and copies the data. Consideration 2 - If both of the -x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present -in the request as follows: x-amz-copy-source-if-none-match condition evaluates to false, -and; x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 returns -412 Precondition Failed response code. Versioning If your bucket has versioning -enabled, you could have multiple versions of the same object. By default, x-amz-copy-source -identifies the current version of the object to copy. If the current version is a delete -marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 -error, because the object does not exist. If you specify versionId in the x-amz-copy-source -and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are -not allowed to specify a delete marker as a version for the x-amz-copy-source. You can -optionally specify a specific version of the source object to copy by adding the versionId -subresource as shown in the following example: x-amz-copy-source: -/bucket/object?versionId=version id Special errors Code: NoSuchUpload Cause: -The specified multipart upload does not exist. The upload ID might be invalid, or the -multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found - Code: InvalidRequest Cause: The specified copy source is not supported as a -byte-range copy source. HTTP Status Code: 400 Bad Request The following -operations are related to UploadPartCopy: CreateMultipartUpload UploadPart -CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads +the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For +directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) +(AES256) is supported. Special errors Error Code: NoSuchUpload Description: The +specified multipart upload does not exist. The upload ID might be invalid, or the multipart +upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error +Code: InvalidRequest Description: The specified copy source is not supported as a +byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax +Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to +UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload +AbortMultipartUpload ListParts ListMultipartUploads # Arguments -- `bucket`: The bucket name. When using this action with an access point, you must direct - requests to the access point hostname. The access point hostname takes the form +- `bucket`: The bucket name. Directory buckets - When you use this operation with a + directory bucket, you must use virtual-hosted-style requests in the format + Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. + Directory bucket names must be unique in the chosen Availability Zone. Bucket names must + follow the format bucket_base_name--az-id--x-s3 (for example, + DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see + Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use + this action with an access point, you must provide the alias of the access point in place + of the bucket name or specify the access point ARN. When using the access point ARN, you + must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access - points in the Amazon S3 User Guide. When you use this action with Amazon S3 on Outposts, - you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes - the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you - use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the - Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + points in the Amazon S3 User Guide. Access points and Object Lambda access points are not + supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 + on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + hostname takes the form + AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this + action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts + access point ARN in place of the bucket name. For more information about S3 on Outposts + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `part_number`: Part number of part being copied. This is a positive integer between 1 and 10,000. @@ -6771,60 +7868,91 @@ CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipart t;key>. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - The value must be URL encoded. Amazon S3 supports copy operations using access points only - when the source and destination buckets are in the same Amazon Web Services Region. - Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the - object as accessed in the format + The value must be URL encoded. Amazon S3 supports copy operations using Access points + only when the source and destination buckets are in the same Amazon Web Services Region. + Access points are not supported by directory buckets. Alternatively, for objects + accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the + format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/< ;key>. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - The value must be URL-encoded. To copy a specific version of an object, append - ?versionId=<version-id> to the value (for example, - awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you - don't specify a version ID, Amazon S3 copies the latest version of the source object. + The value must be URL-encoded. If your bucket has versioning enabled, you could have + multiple versions of the same object. By default, x-amz-copy-source identifies the current + version of the source object to copy. To copy a specific version of the source object to + copy, append ?versionId=<version-id> to the x-amz-copy-source request header (for + example, x-amz-copy-source: + /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If + the current version is a delete marker and you don't specify a versionId in the + x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, because the + object does not exist. If you specify versionId in the x-amz-copy-source and the versionId + is a delete marker, Amazon S3 returns an HTTP 400 Bad Request error, because you are not + allowed to specify a delete marker as a version for the x-amz-copy-source. Directory + buckets - S3 Versioning isn't enabled and supported for directory buckets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"x-amz-copy-source-if-match"`: Copies the object if its entity tag (ETag) matches the - specified tag. + specified tag. If both of the x-amz-copy-source-if-match and + x-amz-copy-source-if-unmodified-since headers are present in the request as follows: + x-amz-copy-source-if-match condition evaluates to true, and; + x-amz-copy-source-if-unmodified-since condition evaluates to false; Amazon S3 returns 200 + OK and copies the data. - `"x-amz-copy-source-if-modified-since"`: Copies the object if it has been modified since - the specified time. + the specified time. If both of the x-amz-copy-source-if-none-match and + x-amz-copy-source-if-modified-since headers are present in the request as follows: + x-amz-copy-source-if-none-match condition evaluates to false, and; + x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 returns 412 + Precondition Failed response code. - `"x-amz-copy-source-if-none-match"`: Copies the object if its entity tag (ETag) is - different than the specified ETag. + different than the specified ETag. If both of the x-amz-copy-source-if-none-match and + x-amz-copy-source-if-modified-since headers are present in the request as follows: + x-amz-copy-source-if-none-match condition evaluates to false, and; + x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 returns 412 + Precondition Failed response code. - `"x-amz-copy-source-if-unmodified-since"`: Copies the object if it hasn't been modified - since the specified time. + since the specified time. If both of the x-amz-copy-source-if-match and + x-amz-copy-source-if-unmodified-since headers are present in the request as follows: + x-amz-copy-source-if-match condition evaluates to true, and; + x-amz-copy-source-if-unmodified-since condition evaluates to false; Amazon S3 returns 200 + OK and copies the data. - `"x-amz-copy-source-range"`: The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first 10 bytes of the source. You can copy a range only if the source object is greater than 5 MB. - `"x-amz-copy-source-server-side-encryption-customer-algorithm"`: Specifies the algorithm - to use when decrypting the source object (for example, AES256). + to use when decrypting the source object (for example, AES256). This functionality is not + supported when the source object is in a directory bucket. - `"x-amz-copy-source-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was - created. + created. This functionality is not supported when the source object is in a directory + bucket. - `"x-amz-copy-source-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. + This functionality is not supported when the source object is in a directory bucket. - `"x-amz-expected-bucket-owner"`: The account ID of the expected destination bucket owner. - If the destination bucket is owned by a different account, the request fails with the HTTP - status code 403 Forbidden (access denied). + If the account ID that you provide does not match the actual owner of the destination + bucket, the request fails with the HTTP status code 403 Forbidden (access denied). - `"x-amz-request-payer"`: -- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use to - when encrypting the object (for example, AES256). +- `"x-amz-server-side-encryption-customer-algorithm"`: Specifies the algorithm to use when + encrypting the object (for example, AES256). This functionality is not supported when the + destination bucket is a directory bucket. - `"x-amz-server-side-encryption-customer-key"`: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This must be the same encryption - key specified in the initiate multipart upload request. + key specified in the initiate multipart upload request. This functionality is not + supported when the destination bucket is a directory bucket. - `"x-amz-server-side-encryption-customer-key-MD5"`: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message - integrity check to ensure that the encryption key was transmitted without error. + integrity check to ensure that the encryption key was transmitted without error. This + functionality is not supported when the destination bucket is a directory bucket. - `"x-amz-source-expected-bucket-owner"`: The account ID of the expected source bucket - owner. If the source bucket is owned by a different account, the request fails with the - HTTP status code 403 Forbidden (access denied). + owner. If the account ID that you provide does not match the actual owner of the source + bucket, the request fails with the HTTP status code 403 Forbidden (access denied). """ function upload_part_copy( Bucket, @@ -6878,35 +8006,35 @@ end write_get_object_response(x-amz-request-route, x-amz-request-token) write_get_object_response(x-amz-request-route, x-amz-request-token, params::Dict{String,<:Any}) -Passes transformed objects to a GetObject operation when using Object Lambda access points. -For information about Object Lambda access points, see Transforming objects with Object -Lambda access points in the Amazon S3 User Guide. This operation supports metadata that can -be returned by GetObject, in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, -and ErrorMessage. The GetObject response metadata is supported so that the -WriteGetObjectResponse caller, typically an Lambda function, can provide the same metadata -when it internally invokes GetObject. When WriteGetObjectResponse is called by a -customer-owned Lambda function, the metadata returned to the end user GetObject call might -differ from what Amazon S3 would normally return. You can include any number of metadata -headers. When including a metadata header, it should be prefaced with x-amz-meta. For -example, x-amz-meta-my-custom-header: MyCustomValue. The primary use case for this is to -forward GetObject metadata. Amazon Web Services provides some prebuilt Lambda functions -that you can use with S3 Object Lambda to detect and redact personally identifiable -information (PII) and decompress S3 objects. These Lambda functions are available in the -Amazon Web Services Serverless Application Repository, and can be selected through the -Amazon Web Services Management Console when you create your Object Lambda access point. -Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural -language processing (NLP) service using machine learning to find insights and relationships -in text. It automatically detects personally identifiable information (PII) such as names, -addresses, dates, credit card numbers, and social security numbers from documents in your -Amazon S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, -a natural language processing (NLP) service using machine learning to find insights and -relationships in text. It automatically redacts personally identifiable information (PII) -such as names, addresses, dates, credit card numbers, and social security numbers from -documents in your Amazon S3 bucket. Example 3: Decompression - The Lambda function -S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six -compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For -information on how to view and use these functions, see Using Amazon Web Services built -Lambda functions in the Amazon S3 User Guide. + This operation is not supported by directory buckets. Passes transformed objects to a +GetObject operation when using Object Lambda access points. For information about Object +Lambda access points, see Transforming objects with Object Lambda access points in the +Amazon S3 User Guide. This operation supports metadata that can be returned by GetObject, +in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. The +GetObject response metadata is supported so that the WriteGetObjectResponse caller, +typically an Lambda function, can provide the same metadata when it internally invokes +GetObject. When WriteGetObjectResponse is called by a customer-owned Lambda function, the +metadata returned to the end user GetObject call might differ from what Amazon S3 would +normally return. You can include any number of metadata headers. When including a metadata +header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +MyCustomValue. The primary use case for this is to forward GetObject metadata. Amazon Web +Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to +detect and redact personally identifiable information (PII) and decompress S3 objects. +These Lambda functions are available in the Amazon Web Services Serverless Application +Repository, and can be selected through the Amazon Web Services Management Console when you +create your Object Lambda access point. Example 1: PII Access Control - This Lambda +function uses Amazon Comprehend, a natural language processing (NLP) service using machine +learning to find insights and relationships in text. It automatically detects personally +identifiable information (PII) such as names, addresses, dates, credit card numbers, and +social security numbers from documents in your Amazon S3 bucket. Example 2: PII Redaction +- This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service +using machine learning to find insights and relationships in text. It automatically redacts +personally identifiable information (PII) such as names, addresses, dates, credit card +numbers, and social security numbers from documents in your Amazon S3 bucket. Example 3: +Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress +objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, +zlib, zstandard and ZIP. For information on how to view and use these functions, see Using +Amazon Web Services built Lambda functions in the Amazon S3 User Guide. # Arguments - `x-amz-request-route`: Route prefix to the HTTP URL generated. @@ -7001,8 +8129,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-fwd-header-x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing requested object in Amazon S3 (for example, AES256, aws:kms). - `"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id"`: If present, specifies - the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) - symmetric encryption customer managed key that was used for stored in Amazon S3 object. + the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web Services Key Management Service + (Amazon Web Services KMS) symmetric encryption customer managed key that was used for + stored in Amazon S3 object. - `"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled"`: Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side encryption with Amazon Web Services KMS (SSE-KMS). diff --git a/src/services/s3_control.jl b/src/services/s3_control.jl index 06497ce9b0..3085f990c4 100644 --- a/src/services/s3_control.jl +++ b/src/services/s3_control.jl @@ -4,21 +4,317 @@ using AWS.AWSServices: s3_control using AWS.Compat using AWS.UUIDs +""" + associate_access_grants_identity_center(identity_center_arn, x-amz-account-id) + associate_access_grants_identity_center(identity_center_arn, x-amz-account-id, params::Dict{String,<:Any}) + +Associate your S3 Access Grants instance with an Amazon Web Services IAM Identity Center +instance. Use this action if you want to create access grants for users or groups from your +corporate identity directory. First, you must add your corporate identity directory to +Amazon Web Services IAM Identity Center. Then, you can associate this IAM Identity Center +instance with your S3 Access Grants instance. Permissions You must have the +s3:AssociateAccessGrantsIdentityCenter permission to use this operation. Additional +Permissions You must also have the following permissions: sso:CreateApplication, +sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod. + +# Arguments +- `identity_center_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services IAM + Identity Center instance that you are associating with your S3 Access Grants instance. An + IAM Identity Center instance is your corporate identity directory that you added to the IAM + Identity Center. You can use the ListInstances API operation to retrieve a list of your + Identity Center instances and their ARNs. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function associate_access_grants_identity_center( + IdentityCenterArn, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/identitycenter", + Dict{String,Any}( + "IdentityCenterArn" => IdentityCenterArn, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_access_grants_identity_center( + IdentityCenterArn, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/identitycenter", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IdentityCenterArn" => IdentityCenterArn, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_access_grant(access_grants_location_id, grantee, permission, x-amz-account-id) + create_access_grant(access_grants_location_id, grantee, permission, x-amz-account-id, params::Dict{String,<:Any}) + +Creates an access grant that gives a grantee access to your S3 data. The grantee can be an +IAM user or role or a directory user, or group. Before you can create a grant, you must +have an S3 Access Grants instance in the same Region as the S3 data. You can create an S3 +Access Grants instance using the CreateAccessGrantsInstance. You must also have registered +at least one S3 data location in your S3 Access Grants instance using +CreateAccessGrantsLocation. Permissions You must have the s3:CreateAccessGrant +permission to use this operation. Additional Permissions For any directory identity - +sso:DescribeInstance and sso:DescribeApplication For directory users - +identitystore:DescribeUser For directory groups - identitystore:DescribeGroup + +# Arguments +- `access_grants_location_id`: The ID of the registered location to which you are granting + access. S3 Access Grants assigns this ID when you register the location. S3 Access Grants + assigns the ID default to the default location s3:// and assigns an auto-generated ID to + other locations that you register. If you are passing the default location, you cannot + create an access grant for the entire default location. You must also specify a bucket or a + bucket and prefix in the Subprefix field. +- `grantee`: The user, group, or role to which you are granting access. You can grant + access to an IAM user or role. If you have added your corporate directory to Amazon Web + Services IAM Identity Center and associated your Identity Center instance with your S3 + Access Grants instance, the grantee can also be a corporate directory user or group. +- `permission`: The type of access that you are granting to your S3 data, which can be set + to one of the following values: READ – Grant read-only access to the S3 data. WRITE + – Grant write-only access to the S3 data. READWRITE – Grant both read and write + access to the S3 data. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessGrantsLocationConfiguration"`: The configuration options of the grant location. + The grant location is the S3 path to the data to which you are granting access. It contains + the S3SubPrefix field. The grant scope is the result of appending the subprefix to the + location scope of the registered location. +- `"ApplicationArn"`: The Amazon Resource Name (ARN) of an Amazon Web Services IAM Identity + Center application associated with your Identity Center instance. If an application ARN is + included in the request to create an access grant, the grantee can only access the S3 data + through this application. +- `"S3PrefixType"`: The type of S3SubPrefix. The only possible value is Object. Pass this + value if the access grant scope is an object. Do not pass this value if the access grant + scope is a bucket or a bucket and a prefix. +- `"Tags"`: The Amazon Web Services resource tags that you are adding to the access grant. + Each tag is a label consisting of a user-defined key and value. Tags can help you manage, + identify, organize, search for, and filter resources. +""" +function create_access_grant( + AccessGrantsLocationId, + Grantee, + Permission, + x_amz_account_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/grant", + Dict{String,Any}( + "AccessGrantsLocationId" => AccessGrantsLocationId, + "Grantee" => Grantee, + "Permission" => Permission, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_access_grant( + AccessGrantsLocationId, + Grantee, + Permission, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/grant", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AccessGrantsLocationId" => AccessGrantsLocationId, + "Grantee" => Grantee, + "Permission" => Permission, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_access_grants_instance(x-amz-account-id) + create_access_grants_instance(x-amz-account-id, params::Dict{String,<:Any}) + +Creates an S3 Access Grants instance, which serves as a logical grouping for access grants. +You can create one S3 Access Grants instance per Region per account. Permissions You +must have the s3:CreateAccessGrantsInstance permission to use this operation. Additional +Permissions To associate an IAM Identity Center instance with your S3 Access Grants +instance, you must also have the sso:DescribeInstance, sso:CreateApplication, +sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod permissions. + +# Arguments +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IdentityCenterArn"`: If you would like to associate your S3 Access Grants instance with + an Amazon Web Services IAM Identity Center instance, use this field to pass the Amazon + Resource Name (ARN) of the Amazon Web Services IAM Identity Center instance that you are + associating with your S3 Access Grants instance. An IAM Identity Center instance is your + corporate identity directory that you added to the IAM Identity Center. You can use the + ListInstances API operation to retrieve a list of your Identity Center instances and their + ARNs. +- `"Tags"`: The Amazon Web Services resource tags that you are adding to the S3 Access + Grants instance. Each tag is a label consisting of a user-defined key and value. Tags can + help you manage, identify, organize, search for, and filter resources. +""" +function create_access_grants_instance( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_access_grants_instance( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_access_grants_location(iamrole_arn, location_scope, x-amz-account-id) + create_access_grants_location(iamrole_arn, location_scope, x-amz-account-id, params::Dict{String,<:Any}) + +The S3 data location that you would like to register in your S3 Access Grants instance. +Your S3 data must be in the same Region as your S3 Access Grants instance. The location can +be one of the following: The default S3 location s3:// A bucket - +S3://<bucket-name> A bucket and prefix - S3://<bucket-name>/<prefix> + When you register a location, you must include the IAM role that has permission to manage +the S3 location that you are registering. Give S3 Access Grants permission to assume this +role using a policy. S3 Access Grants assumes this role to manage access to the location +and to vend temporary credentials to grantees or client applications. Permissions You +must have the s3:CreateAccessGrantsLocation permission to use this operation. Additional +Permissions You must also have the following permission for the specified IAM role: +iam:PassRole + +# Arguments +- `iamrole_arn`: The Amazon Resource Name (ARN) of the IAM role for the registered + location. S3 Access Grants assumes this role to manage access to the registered location. +- `location_scope`: The S3 path to the location that you are registering. The location + scope can be the default S3 location s3://, the S3 path to a bucket s3://<bucket>, or + the S3 path to a bucket and prefix s3://<bucket>/<prefix>. A prefix in S3 is a + string of characters at the beginning of an object key name used to organize the objects + that you store in your S3 buckets. For example, object key names that start with the + engineering/ prefix or object key names that start with the marketing/campaigns/ prefix. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The Amazon Web Services resource tags that you are adding to the S3 Access + Grants location. Each tag is a label consisting of a user-defined key and value. Tags can + help you manage, identify, organize, search for, and filter resources. +""" +function create_access_grants_location( + IAMRoleArn, + LocationScope, + x_amz_account_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/location", + Dict{String,Any}( + "IAMRoleArn" => IAMRoleArn, + "LocationScope" => LocationScope, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_access_grants_location( + IAMRoleArn, + LocationScope, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/accessgrantsinstance/location", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IAMRoleArn" => IAMRoleArn, + "LocationScope" => LocationScope, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_access_point(bucket, name, x-amz-account-id) create_access_point(bucket, name, x-amz-account-id, params::Dict{String,<:Any}) -Creates an access point and associates it with the specified bucket. For more information, -see Managing Data Access with Amazon S3 Access Points in the Amazon S3 User Guide. S3 on -Outposts only supports VPC-style access points. For more information, see Accessing -Amazon S3 on Outposts using virtual private cloud (VPC) only access points in the Amazon S3 -User Guide. All Amazon S3 on Outposts REST API requests for this action require an -additional parameter of x-amz-outpost-id to be passed with the request. In addition, you -must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example -of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint -hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the -Examples section. The following actions are related to CreateAccessPoint: -GetAccessPoint DeleteAccessPoint ListAccessPoints + This operation is not supported by directory buckets. Creates an access point and +associates it with the specified bucket. For more information, see Managing Data Access +with Amazon S3 Access Points in the Amazon S3 User Guide. S3 on Outposts only supports +VPC-style access points. For more information, see Accessing Amazon S3 on Outposts using +virtual private cloud (VPC) only access points in the Amazon S3 User Guide. All Amazon S3 +on Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to CreateAccessPoint: GetAccessPoint DeleteAccessPoint + ListAccessPoints # Arguments - `bucket`: The name of the bucket that you want to associate this access point with. For @@ -38,7 +334,10 @@ GetAccessPoint DeleteAccessPoint ListAccessPoints # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BucketAccountId"`: The Amazon Web Services account ID associated with the S3 bucket - associated with this access point. + associated with this access point. For same account access point when your bucket and + access point belong to the same account owner, the BucketAccountId is not required. For + cross-account access point when your bucket and access point are not in the same account, + the BucketAccountId is required. - `"PublicAccessBlockConfiguration"`: The PublicAccessBlock configuration that you want to apply to the access point. - `"VpcConfiguration"`: If you include this field, Amazon S3 restricts access to this @@ -88,9 +387,10 @@ end create_access_point_for_object_lambda(configuration, name, x-amz-account-id) create_access_point_for_object_lambda(configuration, name, x-amz-account-id, params::Dict{String,<:Any}) -Creates an Object Lambda Access Point. For more information, see Transforming objects with -Object Lambda Access Points in the Amazon S3 User Guide. The following actions are related -to CreateAccessPointForObjectLambda: DeleteAccessPointForObjectLambda + This operation is not supported by directory buckets. Creates an Object Lambda Access +Point. For more information, see Transforming objects with Object Lambda Access Points in +the Amazon S3 User Guide. The following actions are related to +CreateAccessPointForObjectLambda: DeleteAccessPointForObjectLambda GetAccessPointForObjectLambda ListAccessPointsForObjectLambda # Arguments @@ -205,11 +505,13 @@ end create_job(client_request_token, operation, priority, report, role_arn, x-amz-account-id) create_job(client_request_token, operation, priority, report, role_arn, x-amz-account-id, params::Dict{String,<:Any}) -You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. -Batch Operations can run a single action on lists of Amazon S3 objects that you specify. -For more information, see S3 Batch Operations in the Amazon S3 User Guide. This action -creates a S3 Batch Operations job. Related actions include: DescribeJob ListJobs - UpdateJobPriority UpdateJobStatus JobOperation +This operation creates an S3 Batch Operations job. You can use S3 Batch Operations to +perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single +action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch +Operations in the Amazon S3 User Guide. Permissions For information about permissions +required to use the Batch Operations, see Granting permissions for S3 Batch Operations in +the Amazon S3 User Guide. Related actions include: DescribeJob ListJobs +UpdateJobPriority UpdateJobStatus JobOperation # Arguments - `client_request_token`: An idempotency token to ensure that you don't accidentally submit @@ -296,11 +598,12 @@ end create_multi_region_access_point(client_token, details, x-amz-account-id) create_multi_region_access_point(client_token, details, x-amz-account-id, params::Dict{String,<:Any}) -Creates a Multi-Region Access Point and associates it with the specified buckets. For more -information about creating Multi-Region Access Points, see Creating Multi-Region Access -Points in the Amazon S3 User Guide. This action will always be routed to the US West -(Oregon) Region. For more information about the restrictions around managing Multi-Region -Access Points, see Managing Multi-Region Access Points in the Amazon S3 User Guide. This + This operation is not supported by directory buckets. Creates a Multi-Region Access Point +and associates it with the specified buckets. For more information about creating +Multi-Region Access Points, see Creating Multi-Region Access Points in the Amazon S3 User +Guide. This action will always be routed to the US West (Oregon) Region. For more +information about the restrictions around working with Multi-Region Access Points, see +Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide. This request is asynchronous, meaning that you might receive a response before the command has completed. When this request provides a response, it provides a token that you can use to monitor the status of the request with DescribeMultiRegionAccessPointOperation. The @@ -362,58 +665,60 @@ function create_multi_region_access_point( end """ - delete_access_point(name, x-amz-account-id) - delete_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) + create_storage_lens_group(storage_lens_group, x-amz-account-id) + create_storage_lens_group(storage_lens_group, x-amz-account-id, params::Dict{String,<:Any}) -Deletes the specified access point. All Amazon S3 on Outposts REST API requests for this -action require an additional parameter of x-amz-outpost-id to be passed with the request. -In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. -For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts -endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, -see the Examples section. The following actions are related to DeleteAccessPoint: -CreateAccessPoint GetAccessPoint ListAccessPoints + Creates a new S3 Storage Lens group and associates it with the specified Amazon Web +Services account ID. An S3 Storage Lens group is a custom grouping of objects based on +prefix, suffix, object tags, object size, object age, or a combination of these filters. +For each Storage Lens group that you’ve created, you can also optionally add Amazon Web +Services resource tags. For more information about S3 Storage Lens groups, see Working with +S3 Storage Lens groups. To use this operation, you must have the permission to perform the +s3:CreateStorageLensGroup action. If you’re trying to create a Storage Lens group with +Amazon Web Services resource tags, you must also have permission to perform the +s3:TagResource action. For more information about the required Storage Lens Groups +permissions, see Setting account permissions to use S3 Storage Lens groups. For information +about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes. # Arguments -- `name`: The name of the access point you want to delete. For using this parameter with - Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id - as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and - CLI, you must specify the ARN of the access point accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint - /<my-accesspoint-name>. For example, to access the access point reports-ap through - Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding - of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. - The value must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the - specified access point. +- `storage_lens_group`: The Storage Lens group configuration. +- `x-amz-account-id`: The Amazon Web Services account ID that the Storage Lens group is + created from and associated with. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The Amazon Web Services resource tags that you're adding to your Storage Lens + group. This parameter is optional. """ -function delete_access_point( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function create_storage_lens_group( + StorageLensGroup, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "DELETE", - "/v20180820/accesspoint/$(name)", + "POST", + "/v20180820/storagelensgroup", Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "StorageLensGroup" => StorageLensGroup, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_access_point( - name, +function create_storage_lens_group( + StorageLensGroup, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "DELETE", - "/v20180820/accesspoint/$(name)", + "POST", + "/v20180820/storagelensgroup", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "StorageLensGroup" => StorageLensGroup, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ), params, ), @@ -424,25 +729,25 @@ function delete_access_point( end """ - delete_access_point_for_object_lambda(name, x-amz-account-id) - delete_access_point_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_grant(id, x-amz-account-id) + delete_access_grant(id, x-amz-account-id, params::Dict{String,<:Any}) -Deletes the specified Object Lambda Access Point. The following actions are related to -DeleteAccessPointForObjectLambda: CreateAccessPointForObjectLambda -GetAccessPointForObjectLambda ListAccessPointsForObjectLambda +Deletes the access grant from the S3 Access Grants instance. You cannot undo an access +grant deletion and the grantee will no longer have access to the S3 data. Permissions You +must have the s3:DeleteAccessGrant permission to use this operation. # Arguments -- `name`: The name of the access point you want to delete. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `id`: The ID of the access grant. S3 Access Grants auto-generates this ID when you create + the access grant. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. """ -function delete_access_point_for_object_lambda( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function delete_access_grant( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/accesspointforobjectlambda/$(name)", + "/v20180820/accessgrantsinstance/grant/$(id)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -450,15 +755,15 @@ function delete_access_point_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function delete_access_point_for_object_lambda( - name, +function delete_access_grant( + id, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "DELETE", - "/v20180820/accesspointforobjectlambda/$(name)", + "/v20180820/accessgrantsinstance/grant/$(id)", Dict{String,Any}( mergewith( _merge, @@ -474,36 +779,27 @@ function delete_access_point_for_object_lambda( end """ - delete_access_point_policy(name, x-amz-account-id) - delete_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_grants_instance(x-amz-account-id) + delete_access_grants_instance(x-amz-account-id, params::Dict{String,<:Any}) -Deletes the access point policy for the specified access point. All Amazon S3 on Outposts -REST API requests for this action require an additional parameter of x-amz-outpost-id to be -passed with the request. In addition, you must use an S3 on Outposts endpoint hostname -prefix instead of s3-control. For an example of the request syntax for Amazon S3 on -Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id -derived by using the access point ARN, see the Examples section. The following actions are -related to DeleteAccessPointPolicy: PutAccessPointPolicy GetAccessPointPolicy +Deletes your S3 Access Grants instance. You must first delete the access grants and +locations before S3 Access Grants can delete the instance. See DeleteAccessGrant and +DeleteAccessGrantsLocation. If you have associated an IAM Identity Center instance with +your S3 Access Grants instance, you must first dissassociate the Identity Center instance +from the S3 Access Grants instance before you can delete the S3 Access Grants instance. See +AssociateAccessGrantsIdentityCenter and DissociateAccessGrantsIdentityCenter. Permissions +You must have the s3:DeleteAccessGrantsInstance permission to use this operation. # Arguments -- `name`: The name of the access point whose policy you want to delete. For using this - parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the - x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web - Services SDK and CLI, you must specify the ARN of the access point accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint - /<my-accesspoint-name>. For example, to access the access point reports-ap through - Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding - of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. - The value must be URL encoded. -- `x-amz-account-id`: The account ID for the account that owns the specified access point. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. """ -function delete_access_point_policy( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function delete_access_grants_instance( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/accesspoint/$(name)/policy", + "/v20180820/accessgrantsinstance", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -511,15 +807,14 @@ function delete_access_point_policy( feature_set=SERVICE_FEATURE_SET, ) end -function delete_access_point_policy( - name, +function delete_access_grants_instance( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "DELETE", - "/v20180820/accesspoint/$(name)/policy", + "/v20180820/accessgrantsinstance", Dict{String,Any}( mergewith( _merge, @@ -535,25 +830,25 @@ function delete_access_point_policy( end """ - delete_access_point_policy_for_object_lambda(name, x-amz-account-id) - delete_access_point_policy_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_grants_instance_resource_policy(x-amz-account-id) + delete_access_grants_instance_resource_policy(x-amz-account-id, params::Dict{String,<:Any}) -Removes the resource policy for an Object Lambda Access Point. The following actions are -related to DeleteAccessPointPolicyForObjectLambda: GetAccessPointPolicyForObjectLambda - PutAccessPointPolicyForObjectLambda +Deletes the resource policy of the S3 Access Grants instance. The resource policy is used +to manage cross-account access to your S3 Access Grants instance. By deleting the resource +policy, you delete any cross-account permissions to your S3 Access Grants instance. +Permissions You must have the s3:DeleteAccessGrantsInstanceResourcePolicy permission to +use this operation. # Arguments -- `name`: The name of the Object Lambda Access Point you want to delete the policy for. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. """ -function delete_access_point_policy_for_object_lambda( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function delete_access_grants_instance_resource_policy( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/accesspointforobjectlambda/$(name)/policy", + "/v20180820/accessgrantsinstance/resourcepolicy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -561,15 +856,14 @@ function delete_access_point_policy_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function delete_access_point_policy_for_object_lambda( - name, +function delete_access_grants_instance_resource_policy( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "DELETE", - "/v20180820/accesspointforobjectlambda/$(name)/policy", + "/v20180820/accessgrantsinstance/resourcepolicy", Dict{String,Any}( mergewith( _merge, @@ -585,40 +879,30 @@ function delete_access_point_policy_for_object_lambda( end """ - delete_bucket(name, x-amz-account-id) - delete_bucket(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_grants_location(id, x-amz-account-id) + delete_access_grants_location(id, x-amz-account-id, params::Dict{String,<:Any}) - This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see -DeleteBucket in the Amazon S3 API Reference. Deletes the Amazon S3 on Outposts bucket. -All objects (including all object versions and delete markers) in the bucket must be -deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 -on Outposts in Amazon S3 User Guide. All Amazon S3 on Outposts REST API requests for this -action require an additional parameter of x-amz-outpost-id to be passed with the request. -In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. -For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts -endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, -see the Examples section. Related Resources CreateBucket GetBucket -DeleteObject +Deregisters a location from your S3 Access Grants instance. You can only delete a location +registration from an S3 Access Grants instance if there are no grants associated with this +location. See Delete a grant for information on how to delete grants. You need to have at +least one registered location in your S3 Access Grants instance in order to create access +grants. Permissions You must have the s3:DeleteAccessGrantsLocation permission to use +this operation. # Arguments -- `name`: Specifies the bucket being deleted. For using this parameter with Amazon S3 on - Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well. For - using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must - specify the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The account ID that owns the Outposts bucket. +- `id`: The ID of the registered location that you are deregistering from your S3 Access + Grants instance. S3 Access Grants assigned this ID when you registered the location. S3 + Access Grants assigns the ID default to the default location s3:// and assigns an + auto-generated ID to other locations that you register. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. """ -function delete_bucket( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function delete_access_grants_location( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)", + "/v20180820/accessgrantsinstance/location/$(id)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -626,15 +910,15 @@ function delete_bucket( feature_set=SERVICE_FEATURE_SET, ) end -function delete_bucket( - name, +function delete_access_grants_location( + id, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)", + "/v20180820/accessgrantsinstance/location/$(id)", Dict{String,Any}( mergewith( _merge, @@ -650,46 +934,38 @@ function delete_bucket( end """ - delete_bucket_lifecycle_configuration(name, x-amz-account-id) - delete_bucket_lifecycle_configuration(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_point(name, x-amz-account-id) + delete_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) - This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete -an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon S3 API -Reference. Deletes the lifecycle configuration from the specified Outposts bucket. Amazon -S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource -associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer -automatically deletes any objects on the basis of rules contained in the deleted lifecycle -configuration. For more information, see Using Amazon S3 on Outposts in Amazon S3 User -Guide. To use this action, you must have permission to perform the -s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this -permission and the Outposts bucket owner can grant this permission to others. All Amazon S3 -on Outposts REST API requests for this action require an additional parameter of -x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts -endpoint hostname prefix instead of s3-control. For an example of the request syntax for -Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. For more -information about object expiration, see Elements to Describe Lifecycle Actions. Related -actions include: PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration + This operation is not supported by directory buckets. Deletes the specified access point. +All Amazon S3 on Outposts REST API requests for this action require an additional parameter +of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on +Outposts endpoint hostname prefix instead of s3-control. For an example of the request +syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and +the x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to DeleteAccessPoint: CreateAccessPoint GetAccessPoint + ListAccessPoints # Arguments -- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with - the REST API, you must specify the name and the x-amz-outpost-id as well. For using this - parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify - the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The account ID of the lifecycle configuration to delete. +- `name`: The name of the access point you want to delete. For using this parameter with + Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id + as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and + CLI, you must specify the ARN of the access point accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint + /<my-accesspoint-name>. For example, to access the access point reports-ap through + Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding + of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + The value must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the + specified access point. """ -function delete_bucket_lifecycle_configuration( +function delete_access_point( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/lifecycleconfiguration", + "/v20180820/accesspoint/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -697,7 +973,7 @@ function delete_bucket_lifecycle_configuration( feature_set=SERVICE_FEATURE_SET, ) end -function delete_bucket_lifecycle_configuration( +function delete_access_point( name, x_amz_account_id, params::AbstractDict{String}; @@ -705,7 +981,7 @@ function delete_bucket_lifecycle_configuration( ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/lifecycleconfiguration", + "/v20180820/accesspoint/$(name)", Dict{String,Any}( mergewith( _merge, @@ -721,50 +997,26 @@ function delete_bucket_lifecycle_configuration( end """ - delete_bucket_policy(name, x-amz-account-id) - delete_bucket_policy(name, x-amz-account-id, params::Dict{String,<:Any}) - - This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, -see DeleteBucketPolicy in the Amazon S3 API Reference. This implementation of the DELETE -action uses the policy subresource to delete the policy of a specified Amazon S3 on -Outposts bucket. If you are using an identity other than the root user of the Amazon Web -Services account that owns the bucket, the calling identity must have the -s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to -the bucket owner's account to use this action. For more information, see Using Amazon S3 on -Outposts in Amazon S3 User Guide. If you don't have DeleteBucketPolicy permissions, Amazon -S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not -using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -Method Not Allowed error. As a security precaution, the root user of the Amazon Web -Services account that owns a bucket can always use this action, even if the policy -explicitly denies the root user the ability to perform this action. For more information -about bucket policies, see Using Bucket Policies and User Policies. All Amazon S3 on -Outposts REST API requests for this action require an additional parameter of -x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts -endpoint hostname prefix instead of s3-control. For an example of the request syntax for -Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. The -following actions are related to DeleteBucketPolicy: GetBucketPolicy PutBucketPolicy + delete_access_point_for_object_lambda(name, x-amz-account-id) + delete_access_point_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + This operation is not supported by directory buckets. Deletes the specified Object Lambda +Access Point. The following actions are related to DeleteAccessPointForObjectLambda: +CreateAccessPointForObjectLambda GetAccessPointForObjectLambda +ListAccessPointsForObjectLambda # Arguments -- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with - the REST API, you must specify the name and the x-amz-outpost-id as well. For using this - parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify - the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The account ID of the Outposts bucket. +- `name`: The name of the access point you want to delete. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda + Access Point. """ -function delete_bucket_policy( +function delete_access_point_for_object_lambda( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/policy", + "/v20180820/accesspointforobjectlambda/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -772,7 +1024,7 @@ function delete_bucket_policy( feature_set=SERVICE_FEATURE_SET, ) end -function delete_bucket_policy( +function delete_access_point_for_object_lambda( name, x_amz_account_id, params::AbstractDict{String}; @@ -780,7 +1032,7 @@ function delete_bucket_policy( ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/policy", + "/v20180820/accesspointforobjectlambda/$(name)", Dict{String,Any}( mergewith( _merge, @@ -796,51 +1048,37 @@ function delete_bucket_policy( end """ - delete_bucket_replication(name, x-amz-account-id) - delete_bucket_replication(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_point_policy(name, x-amz-account-id) + delete_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) - This operation deletes an Amazon S3 on Outposts bucket's replication configuration. To -delete an S3 bucket's replication configuration, see DeleteBucketReplication in the Amazon -S3 API Reference. Deletes the replication configuration from the specified S3 on Outposts -bucket. To use this operation, you must have permissions to perform the -s3-outposts:PutReplicationConfiguration action. The Outposts bucket owner has this -permission by default and can grant it to others. For more information about permissions, -see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts buckets in the -Amazon S3 User Guide. It can take a while to propagate PUT or DELETE requests for a -replication configuration to all S3 on Outposts systems. Therefore, the replication -configuration that's returned by a GET request soon after a PUT or DELETE request might -return a more recent result than what's on the Outpost. If an Outpost is offline, the delay -in updating the replication configuration on that Outpost can be significant. All Amazon -S3 on Outposts REST API requests for this action require an additional parameter of -x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts -endpoint hostname prefix instead of s3-control. For an example of the request syntax for -Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. For -information about S3 replication on Outposts configuration, see Replicating objects for S3 -on Outposts in the Amazon S3 User Guide. The following operations are related to -DeleteBucketReplication: PutBucketReplication GetBucketReplication + This operation is not supported by directory buckets. Deletes the access point policy for +the specified access point. All Amazon S3 on Outposts REST API requests for this action +require an additional parameter of x-amz-outpost-id to be passed with the request. In +addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. +For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts +endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, +see the Examples section. The following actions are related to DeleteAccessPointPolicy: +PutAccessPointPolicy GetAccessPointPolicy # Arguments -- `name`: Specifies the S3 on Outposts bucket to delete the replication configuration for. - For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the - name and the x-amz-outpost-id as well. For using this parameter with S3 on Outposts with - the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the - format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket to delete - the replication configuration for. +- `name`: The name of the access point whose policy you want to delete. For using this + parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the + x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web + Services SDK and CLI, you must specify the ARN of the access point accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint + /<my-accesspoint-name>. For example, to access the access point reports-ap through + Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding + of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + The value must be URL encoded. +- `x-amz-account-id`: The account ID for the account that owns the specified access point. """ -function delete_bucket_replication( +function delete_access_point_policy( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/replication", + "/v20180820/accesspoint/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -848,7 +1086,7 @@ function delete_bucket_replication( feature_set=SERVICE_FEATURE_SET, ) end -function delete_bucket_replication( +function delete_access_point_policy( name, x_amz_account_id, params::AbstractDict{String}; @@ -856,7 +1094,7 @@ function delete_bucket_replication( ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/replication", + "/v20180820/accesspoint/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -872,42 +1110,26 @@ function delete_bucket_replication( end """ - delete_bucket_tagging(name, x-amz-account-id) - delete_bucket_tagging(name, x-amz-account-id, params::Dict{String,<:Any}) + delete_access_point_policy_for_object_lambda(name, x-amz-account-id) + delete_access_point_policy_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) - This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, -see DeleteBucketTagging in the Amazon S3 API Reference. Deletes the tags from the -Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon S3 User -Guide. To use this action, you must have permission to perform the PutBucketTagging action. -By default, the bucket owner has this permission and can grant this permission to others. -All Amazon S3 on Outposts REST API requests for this action require an additional parameter -of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on -Outposts endpoint hostname prefix instead of s3-control. For an example of the request -syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and -the x-amz-outpost-id derived by using the access point ARN, see the Examples section. The -following actions are related to DeleteBucketTagging: GetBucketTagging -PutBucketTagging + This operation is not supported by directory buckets. Removes the resource policy for an +Object Lambda Access Point. The following actions are related to +DeleteAccessPointPolicyForObjectLambda: GetAccessPointPolicyForObjectLambda +PutAccessPointPolicyForObjectLambda # Arguments -- `name`: The bucket ARN that has the tag set to be removed. For using this parameter with - Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id - as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and - CLI, you must specify the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket tag set to - be removed. +- `name`: The name of the Object Lambda Access Point you want to delete the policy for. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda + Access Point. """ -function delete_bucket_tagging( +function delete_access_point_policy_for_object_lambda( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/tagging", + "/v20180820/accesspointforobjectlambda/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -915,7 +1137,7 @@ function delete_bucket_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function delete_bucket_tagging( +function delete_access_point_policy_for_object_lambda( name, x_amz_account_id, params::AbstractDict{String}; @@ -923,7 +1145,7 @@ function delete_bucket_tagging( ) return s3_control( "DELETE", - "/v20180820/bucket/$(name)/tagging", + "/v20180820/accesspointforobjectlambda/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -939,27 +1161,40 @@ function delete_bucket_tagging( end """ - delete_job_tagging(id, x-amz-account-id) - delete_job_tagging(id, x-amz-account-id, params::Dict{String,<:Any}) + delete_bucket(name, x-amz-account-id) + delete_bucket(name, x-amz-account-id, params::Dict{String,<:Any}) -Removes the entire tag set from the specified S3 Batch Operations job. To use the -DeleteJobTagging operation, you must have permission to perform the s3:DeleteJobTagging -action. For more information, see Controlling access and labeling jobs using tags in the -Amazon S3 User Guide. Related actions include: CreateJob GetJobTagging -PutJobTagging + This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see +DeleteBucket in the Amazon S3 API Reference. Deletes the Amazon S3 on Outposts bucket. +All objects (including all object versions and delete markers) in the bucket must be +deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 +on Outposts in Amazon S3 User Guide. All Amazon S3 on Outposts REST API requests for this +action require an additional parameter of x-amz-outpost-id to be passed with the request. +In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. +For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts +endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, +see the Examples section. Related Resources CreateBucket GetBucket +DeleteObject # Arguments -- `id`: The ID for the S3 Batch Operations job whose tags you want to delete. -- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch - Operations job. +- `name`: Specifies the bucket being deleted. For using this parameter with Amazon S3 on + Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well. For + using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must + specify the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The account ID that owns the Outposts bucket. """ -function delete_job_tagging( - id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function delete_bucket( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "DELETE", - "/v20180820/jobs/$(id)/tagging", + "/v20180820/bucket/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -967,15 +1202,15 @@ function delete_job_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function delete_job_tagging( - id, +function delete_bucket( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "DELETE", - "/v20180820/jobs/$(id)/tagging", + "/v20180820/bucket/$(name)", Dict{String,Any}( mergewith( _merge, @@ -991,64 +1226,1128 @@ function delete_job_tagging( end """ - delete_multi_region_access_point(client_token, details, x-amz-account-id) - delete_multi_region_access_point(client_token, details, x-amz-account-id, params::Dict{String,<:Any}) + delete_bucket_lifecycle_configuration(name, x-amz-account-id) + delete_bucket_lifecycle_configuration(name, x-amz-account-id, params::Dict{String,<:Any}) + + This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete +an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon S3 API +Reference. Deletes the lifecycle configuration from the specified Outposts bucket. Amazon +S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource +associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer +automatically deletes any objects on the basis of rules contained in the deleted lifecycle +configuration. For more information, see Using Amazon S3 on Outposts in Amazon S3 User +Guide. To use this operation, you must have permission to perform the +s3-outposts:PutLifecycleConfiguration action. By default, the bucket owner has this +permission and the Outposts bucket owner can grant this permission to others. All Amazon S3 +on Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. For more +information about object expiration, see Elements to Describe Lifecycle Actions. Related +actions include: PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration + +# Arguments +- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with + the REST API, you must specify the name and the x-amz-outpost-id as well. For using this + parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify + the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The account ID of the lifecycle configuration to delete. + +""" +function delete_bucket_lifecycle_configuration( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/lifecycleconfiguration", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_bucket_lifecycle_configuration( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/lifecycleconfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_bucket_policy(name, x-amz-account-id) + delete_bucket_policy(name, x-amz-account-id, params::Dict{String,<:Any}) + + This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, +see DeleteBucketPolicy in the Amazon S3 API Reference. This implementation of the DELETE +action uses the policy subresource to delete the policy of a specified Amazon S3 on +Outposts bucket. If you are using an identity other than the root user of the Amazon Web +Services account that owns the bucket, the calling identity must have the +s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to +the bucket owner's account to use this action. For more information, see Using Amazon S3 on +Outposts in Amazon S3 User Guide. If you don't have DeleteBucketPolicy permissions, Amazon +S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not +using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +Method Not Allowed error. As a security precaution, the root user of the Amazon Web +Services account that owns a bucket can always use this action, even if the policy +explicitly denies the root user the ability to perform this action. For more information +about bucket policies, see Using Bucket Policies and User Policies. All Amazon S3 on +Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to DeleteBucketPolicy: GetBucketPolicy PutBucketPolicy + + +# Arguments +- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with + the REST API, you must specify the name and the x-amz-outpost-id as well. For using this + parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify + the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The account ID of the Outposts bucket. + +""" +function delete_bucket_policy( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/policy", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_bucket_policy( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/policy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_bucket_replication(name, x-amz-account-id) + delete_bucket_replication(name, x-amz-account-id, params::Dict{String,<:Any}) + + This operation deletes an Amazon S3 on Outposts bucket's replication configuration. To +delete an S3 bucket's replication configuration, see DeleteBucketReplication in the Amazon +S3 API Reference. Deletes the replication configuration from the specified S3 on Outposts +bucket. To use this operation, you must have permissions to perform the +s3-outposts:PutReplicationConfiguration action. The Outposts bucket owner has this +permission by default and can grant it to others. For more information about permissions, +see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts buckets in the +Amazon S3 User Guide. It can take a while to propagate PUT or DELETE requests for a +replication configuration to all S3 on Outposts systems. Therefore, the replication +configuration that's returned by a GET request soon after a PUT or DELETE request might +return a more recent result than what's on the Outpost. If an Outpost is offline, the delay +in updating the replication configuration on that Outpost can be significant. All Amazon +S3 on Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. For +information about S3 replication on Outposts configuration, see Replicating objects for S3 +on Outposts in the Amazon S3 User Guide. The following operations are related to +DeleteBucketReplication: PutBucketReplication GetBucketReplication + +# Arguments +- `name`: Specifies the S3 on Outposts bucket to delete the replication configuration for. + For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the + name and the x-amz-outpost-id as well. For using this parameter with S3 on Outposts with + the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the + format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket to delete + the replication configuration for. + +""" +function delete_bucket_replication( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/replication", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_bucket_replication( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/replication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_bucket_tagging(name, x-amz-account-id) + delete_bucket_tagging(name, x-amz-account-id, params::Dict{String,<:Any}) + + This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, +see DeleteBucketTagging in the Amazon S3 API Reference. Deletes the tags from the +Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon S3 User +Guide. To use this action, you must have permission to perform the PutBucketTagging action. +By default, the bucket owner has this permission and can grant this permission to others. +All Amazon S3 on Outposts REST API requests for this action require an additional parameter +of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on +Outposts endpoint hostname prefix instead of s3-control. For an example of the request +syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and +the x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to DeleteBucketTagging: GetBucketTagging +PutBucketTagging + +# Arguments +- `name`: The bucket ARN that has the tag set to be removed. For using this parameter with + Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id + as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and + CLI, you must specify the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket tag set to + be removed. + +""" +function delete_bucket_tagging( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/tagging", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_bucket_tagging( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/bucket/$(name)/tagging", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_job_tagging(id, x-amz-account-id) + delete_job_tagging(id, x-amz-account-id, params::Dict{String,<:Any}) + +Removes the entire tag set from the specified S3 Batch Operations job. Permissions To use +the DeleteJobTagging operation, you must have permission to perform the s3:DeleteJobTagging +action. For more information, see Controlling access and labeling jobs using tags in the +Amazon S3 User Guide. Related actions include: CreateJob GetJobTagging +PutJobTagging + +# Arguments +- `id`: The ID for the S3 Batch Operations job whose tags you want to delete. +- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch + Operations job. + +""" +function delete_job_tagging( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/jobs/$(id)/tagging", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_job_tagging( + id, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/jobs/$(id)/tagging", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_multi_region_access_point(client_token, details, x-amz-account-id) + delete_multi_region_access_point(client_token, details, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Deletes a Multi-Region Access +Point. This action does not delete the buckets associated with the Multi-Region Access +Point, only the Multi-Region Access Point itself. This action will always be routed to the +US West (Oregon) Region. For more information about the restrictions around working with +Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in +the Amazon S3 User Guide. This request is asynchronous, meaning that you might receive a +response before the command has completed. When this request provides a response, it +provides a token that you can use to monitor the status of the request with +DescribeMultiRegionAccessPointOperation. The following actions are related to +DeleteMultiRegionAccessPoint: CreateMultiRegionAccessPoint +DescribeMultiRegionAccessPointOperation GetMultiRegionAccessPoint +ListMultiRegionAccessPoints + +# Arguments +- `client_token`: An idempotency token used to identify the request and guarantee that + requests are unique. +- `details`: A container element containing details about the Multi-Region Access Point. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. + +""" +function delete_multi_region_access_point( + ClientToken, + Details, + x_amz_account_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/async-requests/mrap/delete", + Dict{String,Any}( + "ClientToken" => ClientToken, + "Details" => Details, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_multi_region_access_point( + ClientToken, + Details, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/async-requests/mrap/delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientToken" => ClientToken, + "Details" => Details, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_public_access_block(x-amz-account-id) + delete_public_access_block(x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Removes the PublicAccessBlock +configuration for an Amazon Web Services account. For more information, see Using Amazon +S3 block public access. Related actions include: GetPublicAccessBlock +PutPublicAccessBlock + +# Arguments +- `x-amz-account-id`: The account ID for the Amazon Web Services account whose + PublicAccessBlock configuration you want to remove. + +""" +function delete_public_access_block( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/configuration/publicAccessBlock", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_public_access_block( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/configuration/publicAccessBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_storage_lens_configuration(storagelensid, x-amz-account-id) + delete_storage_lens_configuration(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Deletes the Amazon S3 Storage Lens +configuration. For more information about S3 Storage Lens, see Assessing your storage +activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. To use this +action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. +For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon +S3 User Guide. + +# Arguments +- `storagelensid`: The ID of the S3 Storage Lens configuration. +- `x-amz-account-id`: The account ID of the requester. + +""" +function delete_storage_lens_configuration( + storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/storagelens/$(storagelensid)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_storage_lens_configuration( + storagelensid, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/storagelens/$(storagelensid)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_storage_lens_configuration_tagging(storagelensid, x-amz-account-id) + delete_storage_lens_configuration_tagging(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Deletes the Amazon S3 Storage Lens +configuration tags. For more information about S3 Storage Lens, see Assessing your storage +activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. To use this +action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging +action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the +Amazon S3 User Guide. + +# Arguments +- `storagelensid`: The ID of the S3 Storage Lens configuration. +- `x-amz-account-id`: The account ID of the requester. + +""" +function delete_storage_lens_configuration_tagging( + storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/storagelens/$(storagelensid)/tagging", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_storage_lens_configuration_tagging( + storagelensid, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/storagelens/$(storagelensid)/tagging", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_storage_lens_group(name, x-amz-account-id) + delete_storage_lens_group(name, x-amz-account-id, params::Dict{String,<:Any}) + + Deletes an existing S3 Storage Lens group. To use this operation, you must have the +permission to perform the s3:DeleteStorageLensGroup action. For more information about the +required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage +Lens groups. For information about Storage Lens groups errors, see List of Amazon S3 +Storage Lens error codes. + +# Arguments +- `name`: The name of the Storage Lens group that you're trying to delete. +- `x-amz-account-id`: The Amazon Web Services account ID used to create the Storage Lens + group that you're trying to delete. + +""" +function delete_storage_lens_group( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/storagelensgroup/$(name)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_storage_lens_group( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/storagelensgroup/$(name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_job(id, x-amz-account-id) + describe_job(id, x-amz-account-id, params::Dict{String,<:Any}) + +Retrieves the configuration parameters and status for a Batch Operations job. For more +information, see S3 Batch Operations in the Amazon S3 User Guide. Permissions To use the +DescribeJob operation, you must have permission to perform the s3:DescribeJob action. +Related actions include: CreateJob ListJobs UpdateJobPriority +UpdateJobStatus + +# Arguments +- `id`: The ID for the job whose information you want to retrieve. +- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch + Operations job. + +""" +function describe_job( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/jobs/$(id)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_job( + id, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/jobs/$(id)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_multi_region_access_point_operation(request_token, x-amz-account-id) + describe_multi_region_access_point_operation(request_token, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Retrieves the status of an +asynchronous request to manage a Multi-Region Access Point. For more information about +managing Multi-Region Access Points and how asynchronous requests work, see Using +Multi-Region Access Points in the Amazon S3 User Guide. The following actions are related +to GetMultiRegionAccessPoint: CreateMultiRegionAccessPoint +DeleteMultiRegionAccessPoint GetMultiRegionAccessPoint ListMultiRegionAccessPoints + + +# Arguments +- `request_token`: The request token associated with the request you want to know about. + This request token is returned as part of the response when you make an asynchronous + request. You provide this token to query about the status of the asynchronous action. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. + +""" +function describe_multi_region_access_point_operation( + request_token, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/async-requests/mrap/$(request_token)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_multi_region_access_point_operation( + request_token, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/async-requests/mrap/$(request_token)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + dissociate_access_grants_identity_center(x-amz-account-id) + dissociate_access_grants_identity_center(x-amz-account-id, params::Dict{String,<:Any}) + +Dissociates the Amazon Web Services IAM Identity Center instance from the S3 Access Grants +instance. Permissions You must have the s3:DissociateAccessGrantsIdentityCenter +permission to use this operation. Additional Permissions You must have the +sso:DeleteApplication permission to use this operation. + +# Arguments +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function dissociate_access_grants_identity_center( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "DELETE", + "/v20180820/accessgrantsinstance/identitycenter", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function dissociate_access_grants_identity_center( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/accessgrantsinstance/identitycenter", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_grant(id, x-amz-account-id) + get_access_grant(id, x-amz-account-id, params::Dict{String,<:Any}) + +Get the details of an access grant from your S3 Access Grants instance. Permissions You +must have the s3:GetAccessGrant permission to use this operation. + +# Arguments +- `id`: The ID of the access grant. S3 Access Grants auto-generates this ID when you create + the access grant. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function get_access_grant( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/grant/$(id)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_grant( + id, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/grant/$(id)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_grants_instance(x-amz-account-id) + get_access_grants_instance(x-amz-account-id, params::Dict{String,<:Any}) + +Retrieves the S3 Access Grants instance for a Region in your account. Permissions You +must have the s3:GetAccessGrantsInstance permission to use this operation. + +# Arguments +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function get_access_grants_instance( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_grants_instance( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_grants_instance_for_prefix(s3prefix, x-amz-account-id) + get_access_grants_instance_for_prefix(s3prefix, x-amz-account-id, params::Dict{String,<:Any}) + +Retrieve the S3 Access Grants instance that contains a particular prefix. Permissions +You must have the s3:GetAccessGrantsInstanceForPrefix permission for the caller account to +use this operation. Additional Permissions The prefix owner account must grant you the +following permissions to their S3 Access Grants instance: +s3:GetAccessGrantsInstanceForPrefix. + +# Arguments +- `s3prefix`: The S3 prefix of the access grants that you would like to retrieve. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function get_access_grants_instance_for_prefix( + s3prefix, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/prefix", + Dict{String,Any}( + "s3prefix" => s3prefix, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_grants_instance_for_prefix( + s3prefix, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/prefix", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "s3prefix" => s3prefix, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_grants_instance_resource_policy(x-amz-account-id) + get_access_grants_instance_resource_policy(x-amz-account-id, params::Dict{String,<:Any}) + +Returns the resource policy of the S3 Access Grants instance. Permissions You must have +the s3:GetAccessGrantsInstanceResourcePolicy permission to use this operation. + +# Arguments +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function get_access_grants_instance_resource_policy( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/resourcepolicy", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_grants_instance_resource_policy( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/resourcepolicy", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_grants_location(id, x-amz-account-id) + get_access_grants_location(id, x-amz-account-id, params::Dict{String,<:Any}) + +Retrieves the details of a particular location registered in your S3 Access Grants +instance. Permissions You must have the s3:GetAccessGrantsLocation permission to use +this operation. + +# Arguments +- `id`: The ID of the registered location that you are retrieving. S3 Access Grants assigns + this ID when you register the location. S3 Access Grants assigns the ID default to the + default location s3:// and assigns an auto-generated ID to other locations that you + register. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function get_access_grants_location( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/location/$(id)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_grants_location( + id, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/location/$(id)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_access_point(name, x-amz-account-id) + get_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Returns configuration information +about the specified access point. All Amazon S3 on Outposts REST API requests for this +action require an additional parameter of x-amz-outpost-id to be passed with the request. +In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. +For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts +endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, +see the Examples section. The following actions are related to GetAccessPoint: +CreateAccessPoint DeleteAccessPoint ListAccessPoints + +# Arguments +- `name`: The name of the access point whose configuration information you want to + retrieve. For using this parameter with Amazon S3 on Outposts with the REST API, you must + specify the name and the x-amz-outpost-id as well. For using this parameter with S3 on + Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access + point accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint + /<my-accesspoint-name>. For example, to access the access point reports-ap through + Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding + of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + The value must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the + specified access point. + +""" +function get_access_point( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accesspoint/$(name)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_access_point( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accesspoint/$(name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end -Deletes a Multi-Region Access Point. This action does not delete the buckets associated -with the Multi-Region Access Point, only the Multi-Region Access Point itself. This action -will always be routed to the US West (Oregon) Region. For more information about the -restrictions around managing Multi-Region Access Points, see Managing Multi-Region Access -Points in the Amazon S3 User Guide. This request is asynchronous, meaning that you might -receive a response before the command has completed. When this request provides a response, -it provides a token that you can use to monitor the status of the request with -DescribeMultiRegionAccessPointOperation. The following actions are related to -DeleteMultiRegionAccessPoint: CreateMultiRegionAccessPoint -DescribeMultiRegionAccessPointOperation GetMultiRegionAccessPoint -ListMultiRegionAccessPoints +""" + get_access_point_configuration_for_object_lambda(name, x-amz-account-id) + get_access_point_configuration_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + + This operation is not supported by directory buckets. Returns configuration for an Object +Lambda Access Point. The following actions are related to +GetAccessPointConfigurationForObjectLambda: PutAccessPointConfigurationForObjectLambda # Arguments -- `client_token`: An idempotency token used to identify the request and guarantee that - requests are unique. -- `details`: A container element containing details about the Multi-Region Access Point. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region +- `name`: The name of the Object Lambda Access Point you want to return the configuration + for. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda Access Point. """ -function delete_multi_region_access_point( - ClientToken, - Details, - x_amz_account_id; - aws_config::AbstractAWSConfig=global_aws_config(), +function get_access_point_configuration_for_object_lambda( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "POST", - "/v20180820/async-requests/mrap/delete", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)/configuration", Dict{String,Any}( - "ClientToken" => ClientToken, - "Details" => Details, - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_multi_region_access_point( - ClientToken, - Details, +function get_access_point_configuration_for_object_lambda( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "POST", - "/v20180820/async-requests/mrap/delete", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)/configuration", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "ClientToken" => ClientToken, - "Details" => Details, - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ), params, ), @@ -1059,24 +2358,26 @@ function delete_multi_region_access_point( end """ - delete_public_access_block(x-amz-account-id) - delete_public_access_block(x-amz-account-id, params::Dict{String,<:Any}) + get_access_point_for_object_lambda(name, x-amz-account-id) + get_access_point_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) -Removes the PublicAccessBlock configuration for an Amazon Web Services account. For more -information, see Using Amazon S3 block public access. Related actions include: -GetPublicAccessBlock PutPublicAccessBlock + This operation is not supported by directory buckets. Returns configuration information +about the specified Object Lambda Access Point The following actions are related to +GetAccessPointForObjectLambda: CreateAccessPointForObjectLambda +DeleteAccessPointForObjectLambda ListAccessPointsForObjectLambda # Arguments -- `x-amz-account-id`: The account ID for the Amazon Web Services account whose - PublicAccessBlock configuration you want to remove. +- `name`: The name of the Object Lambda Access Point. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda + Access Point. """ -function delete_public_access_block( - x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_access_point_for_object_lambda( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "DELETE", - "/v20180820/configuration/publicAccessBlock", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1084,14 +2385,15 @@ function delete_public_access_block( feature_set=SERVICE_FEATURE_SET, ) end -function delete_public_access_block( +function get_access_point_for_object_lambda( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "DELETE", - "/v20180820/configuration/publicAccessBlock", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)", Dict{String,Any}( mergewith( _merge, @@ -1107,26 +2409,32 @@ function delete_public_access_block( end """ - delete_storage_lens_configuration(storagelensid, x-amz-account-id) - delete_storage_lens_configuration(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + get_access_point_policy(name, x-amz-account-id) + get_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) -Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage -Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the -Amazon S3 User Guide. To use this action, you must have permission to perform the -s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to -use Amazon S3 Storage Lens in the Amazon S3 User Guide. + This operation is not supported by directory buckets. Returns the access point policy +associated with the specified access point. The following actions are related to +GetAccessPointPolicy: PutAccessPointPolicy DeleteAccessPointPolicy # Arguments -- `storagelensid`: The ID of the S3 Storage Lens configuration. -- `x-amz-account-id`: The account ID of the requester. +- `name`: The name of the access point whose policy you want to retrieve. For using this + parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the + x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web + Services SDK and CLI, you must specify the ARN of the access point accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint + /<my-accesspoint-name>. For example, to access the access point reports-ap through + Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding + of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + The value must be URL encoded. +- `x-amz-account-id`: The account ID for the account that owns the specified access point. """ -function delete_storage_lens_configuration( - storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_access_point_policy( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "DELETE", - "/v20180820/storagelens/$(storagelensid)", + "GET", + "/v20180820/accesspoint/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1134,15 +2442,15 @@ function delete_storage_lens_configuration( feature_set=SERVICE_FEATURE_SET, ) end -function delete_storage_lens_configuration( - storagelensid, +function get_access_point_policy( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "DELETE", - "/v20180820/storagelens/$(storagelensid)", + "GET", + "/v20180820/accesspoint/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -1158,26 +2466,26 @@ function delete_storage_lens_configuration( end """ - delete_storage_lens_configuration_tagging(storagelensid, x-amz-account-id) - delete_storage_lens_configuration_tagging(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + get_access_point_policy_for_object_lambda(name, x-amz-account-id) + get_access_point_policy_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) -Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 -Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in -the Amazon S3 User Guide. To use this action, you must have permission to perform the -s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting -permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide. + This operation is not supported by directory buckets. Returns the resource policy for an +Object Lambda Access Point. The following actions are related to +GetAccessPointPolicyForObjectLambda: DeleteAccessPointPolicyForObjectLambda +PutAccessPointPolicyForObjectLambda # Arguments -- `storagelensid`: The ID of the S3 Storage Lens configuration. -- `x-amz-account-id`: The account ID of the requester. +- `name`: The name of the Object Lambda Access Point. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda + Access Point. """ -function delete_storage_lens_configuration_tagging( - storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_access_point_policy_for_object_lambda( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "DELETE", - "/v20180820/storagelens/$(storagelensid)/tagging", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1185,15 +2493,15 @@ function delete_storage_lens_configuration_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function delete_storage_lens_configuration_tagging( - storagelensid, +function get_access_point_policy_for_object_lambda( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "DELETE", - "/v20180820/storagelens/$(storagelensid)/tagging", + "GET", + "/v20180820/accesspointforobjectlambda/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -1209,25 +2517,25 @@ function delete_storage_lens_configuration_tagging( end """ - describe_job(id, x-amz-account-id) - describe_job(id, x-amz-account-id, params::Dict{String,<:Any}) + get_access_point_policy_status(name, x-amz-account-id) + get_access_point_policy_status(name, x-amz-account-id, params::Dict{String,<:Any}) -Retrieves the configuration parameters and status for a Batch Operations job. For more -information, see S3 Batch Operations in the Amazon S3 User Guide. Related actions include: - CreateJob ListJobs UpdateJobPriority UpdateJobStatus + This operation is not supported by directory buckets. Indicates whether the specified +access point currently has a policy that allows public access. For more information about +public access through access points, see Managing Data Access with Amazon S3 access points +in the Amazon S3 User Guide. # Arguments -- `id`: The ID for the job whose information you want to retrieve. -- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch - Operations job. +- `name`: The name of the access point whose policy status you want to retrieve. +- `x-amz-account-id`: The account ID for the account that owns the specified access point. """ -function describe_job( - id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_access_point_policy_status( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/jobs/$(id)", + "/v20180820/accesspoint/$(name)/policyStatus", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1235,15 +2543,15 @@ function describe_job( feature_set=SERVICE_FEATURE_SET, ) end -function describe_job( - id, +function get_access_point_policy_status( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/jobs/$(id)", + "/v20180820/accesspoint/$(name)/policyStatus", Dict{String,Any}( mergewith( _merge, @@ -1259,30 +2567,24 @@ function describe_job( end """ - describe_multi_region_access_point_operation(request_token, x-amz-account-id) - describe_multi_region_access_point_operation(request_token, x-amz-account-id, params::Dict{String,<:Any}) - -Retrieves the status of an asynchronous request to manage a Multi-Region Access Point. For -more information about managing Multi-Region Access Points and how asynchronous requests -work, see Managing Multi-Region Access Points in the Amazon S3 User Guide. The following -actions are related to GetMultiRegionAccessPoint: CreateMultiRegionAccessPoint -DeleteMultiRegionAccessPoint GetMultiRegionAccessPoint ListMultiRegionAccessPoints + get_access_point_policy_status_for_object_lambda(name, x-amz-account-id) + get_access_point_policy_status_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + This operation is not supported by directory buckets. Returns the status of the resource +policy associated with an Object Lambda Access Point. # Arguments -- `request_token`: The request token associated with the request you want to know about. - This request token is returned as part of the response when you make an asynchronous - request. You provide this token to query about the status of the asynchronous action. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region +- `name`: The name of the Object Lambda Access Point. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda Access Point. """ -function describe_multi_region_access_point_operation( - request_token, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_access_point_policy_status_for_object_lambda( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/async-requests/mrap/$(request_token)", + "/v20180820/accesspointforobjectlambda/$(name)/policyStatus", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1290,15 +2592,15 @@ function describe_multi_region_access_point_operation( feature_set=SERVICE_FEATURE_SET, ) end -function describe_multi_region_access_point_operation( - request_token, +function get_access_point_policy_status_for_object_lambda( + name, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/async-requests/mrap/$(request_token)", + "/v20180820/accesspointforobjectlambda/$(name)/policyStatus", Dict{String,Any}( mergewith( _merge, @@ -1314,39 +2616,117 @@ function describe_multi_region_access_point_operation( end """ - get_access_point(name, x-amz-account-id) - get_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) + get_bucket(name, x-amz-account-id) + get_bucket(name, x-amz-account-id, params::Dict{String,<:Any}) -Returns configuration information about the specified access point. All Amazon S3 on +Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on +Outposts in the Amazon S3 User Guide. If you are using an identity other than the root user +of the Amazon Web Services account that owns the Outposts bucket, the calling identity must +have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to +the Outposts bucket owner's account in order to use this action. Only users from Outposts +bucket owner account with the right permissions can perform actions on an Outposts bucket. +If you don't have s3-outposts:GetBucket permissions or you're not using an identity that +belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error. The +following actions are related to GetBucket for Amazon S3 on Outposts: All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. The -following actions are related to GetAccessPoint: CreateAccessPoint DeleteAccessPoint - ListAccessPoints +x-amz-outpost-id derived by using the access point ARN, see the Examples section. +PutObject CreateBucket DeleteBucket + +# Arguments +- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with + the REST API, you must specify the name and the x-amz-outpost-id as well. For using this + parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify + the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. + +""" +function get_bucket( + name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/bucket/$(name)", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_bucket( + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/bucket/$(name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_bucket_lifecycle_configuration(name, x-amz-account-id) + get_bucket_lifecycle_configuration(name, x-amz-account-id, params::Dict{String,<:Any}) + + This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 +bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon S3 API +Reference. Returns the lifecycle configuration information set on the Outposts bucket. +For more information, see Using Amazon S3 on Outposts and for information about lifecycle +configuration, see Object Lifecycle Management in Amazon S3 User Guide. To use this +action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration +action. The Outposts bucket owner has this permission, by default. The bucket owner can +grant this permission to others. For more information about permissions, see Permissions +Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 +Resources. All Amazon S3 on Outposts REST API requests for this action require an +additional parameter of x-amz-outpost-id to be passed with the request. In addition, you +must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example +of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint +hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the +Examples section. GetBucketLifecycleConfiguration has the following special error: Error +code: NoSuchLifecycleConfiguration Description: The lifecycle configuration does not +exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client The following +actions are related to GetBucketLifecycleConfiguration: PutBucketLifecycleConfiguration + DeleteBucketLifecycleConfiguration # Arguments -- `name`: The name of the access point whose configuration information you want to - retrieve. For using this parameter with Amazon S3 on Outposts with the REST API, you must - specify the name and the x-amz-outpost-id as well. For using this parameter with S3 on - Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access - point accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint - /<my-accesspoint-name>. For example, to access the access point reports-ap through - Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding - of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. - The value must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the - specified access point. +- `name`: The Amazon Resource Name (ARN) of the bucket. For using this parameter with + Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id + as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and + CLI, you must specify the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. """ -function get_access_point( +function get_bucket_lifecycle_configuration( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)", + "/v20180820/bucket/$(name)/lifecycleconfiguration", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1354,7 +2734,7 @@ function get_access_point( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point( +function get_bucket_lifecycle_configuration( name, x_amz_account_id, params::AbstractDict{String}; @@ -1362,7 +2742,7 @@ function get_access_point( ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)", + "/v20180820/bucket/$(name)/lifecycleconfiguration", Dict{String,Any}( mergewith( _merge, @@ -1378,26 +2758,49 @@ function get_access_point( end """ - get_access_point_configuration_for_object_lambda(name, x-amz-account-id) - get_access_point_configuration_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + get_bucket_policy(name, x-amz-account-id) + get_bucket_policy(name, x-amz-account-id, params::Dict{String,<:Any}) -Returns configuration for an Object Lambda Access Point. The following actions are related -to GetAccessPointConfigurationForObjectLambda: -PutAccessPointConfigurationForObjectLambda + This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for +an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference. Returns the policy of a +specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the +Amazon S3 User Guide. If you are using an identity other than the root user of the Amazon +Web Services account that owns the bucket, the calling identity must have the +GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's +account in order to use this action. Only users from Outposts bucket owner account with the +right permissions can perform actions on an Outposts bucket. If you don't have +s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the +bucket owner's account, Amazon S3 returns a 403 Access Denied error. As a security +precaution, the root user of the Amazon Web Services account that owns a bucket can always +use this action, even if the policy explicitly denies the root user the ability to perform +this action. For more information about bucket policies, see Using Bucket Policies and +User Policies. All Amazon S3 on Outposts REST API requests for this action require an +additional parameter of x-amz-outpost-id to be passed with the request. In addition, you +must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example +of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint +hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the +Examples section. The following actions are related to GetBucketPolicy: GetObject +PutBucketPolicy DeleteBucketPolicy # Arguments -- `name`: The name of the Object Lambda Access Point you want to return the configuration - for. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with + the REST API, you must specify the name and the x-amz-outpost-id as well. For using this + parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify + the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. """ -function get_access_point_configuration_for_object_lambda( +function get_bucket_policy( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/configuration", + "/v20180820/bucket/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1405,7 +2808,7 @@ function get_access_point_configuration_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_configuration_for_object_lambda( +function get_bucket_policy( name, x_amz_account_id, params::AbstractDict{String}; @@ -1413,7 +2816,7 @@ function get_access_point_configuration_for_object_lambda( ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/configuration", + "/v20180820/bucket/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -1429,26 +2832,54 @@ function get_access_point_configuration_for_object_lambda( end """ - get_access_point_for_object_lambda(name, x-amz-account-id) - get_access_point_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + get_bucket_replication(name, x-amz-account-id) + get_bucket_replication(name, x-amz-account-id, params::Dict{String,<:Any}) -Returns configuration information about the specified Object Lambda Access Point The -following actions are related to GetAccessPointForObjectLambda: -CreateAccessPointForObjectLambda DeleteAccessPointForObjectLambda -ListAccessPointsForObjectLambda + This operation gets an Amazon S3 on Outposts bucket's replication configuration. To get an +S3 bucket's replication configuration, see GetBucketReplication in the Amazon S3 API +Reference. Returns the replication configuration of an S3 on Outposts bucket. For more +information about S3 on Outposts, see Using Amazon S3 on Outposts in the Amazon S3 User +Guide. For information about S3 replication on Outposts configuration, see Replicating +objects for S3 on Outposts in the Amazon S3 User Guide. It can take a while to propagate +PUT or DELETE requests for a replication configuration to all S3 on Outposts systems. +Therefore, the replication configuration that's returned by a GET request soon after a PUT +or DELETE request might return a more recent result than what's on the Outpost. If an +Outpost is offline, the delay in updating the replication configuration on that Outpost can +be significant. This action requires permissions for the +s3-outposts:GetReplicationConfiguration action. The Outposts bucket owner has this +permission by default and can grant it to others. For more information about permissions, +see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts bucket in the +Amazon S3 User Guide. All Amazon S3 on Outposts REST API requests for this action require +an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you +must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example +of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint +hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the +Examples section. If you include the Filter element in a replication configuration, you +must also include the DeleteMarkerReplication, Status, and Priority elements. The response +also returns those elements. For information about S3 on Outposts replication failure +reasons, see Replication failure reasons in the Amazon S3 User Guide. The following +operations are related to GetBucketReplication: PutBucketReplication +DeleteBucketReplication # Arguments -- `name`: The name of the Object Lambda Access Point. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `name`: Specifies the bucket to get the replication information for. For using this + parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the + x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web + Services SDK and CLI, you must specify the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. """ -function get_access_point_for_object_lambda( +function get_bucket_replication( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)", + "/v20180820/bucket/$(name)/replication", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1456,7 +2887,7 @@ function get_access_point_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_for_object_lambda( +function get_bucket_replication( name, x_amz_account_id, params::AbstractDict{String}; @@ -1464,7 +2895,7 @@ function get_access_point_for_object_lambda( ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)", + "/v20180820/bucket/$(name)/replication", Dict{String,Any}( mergewith( _merge, @@ -1480,32 +2911,43 @@ function get_access_point_for_object_lambda( end """ - get_access_point_policy(name, x-amz-account-id) - get_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) + get_bucket_tagging(name, x-amz-account-id) + get_bucket_tagging(name, x-amz-account-id, params::Dict{String,<:Any}) -Returns the access point policy associated with the specified access point. The following -actions are related to GetAccessPointPolicy: PutAccessPointPolicy -DeleteAccessPointPolicy + This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see +GetBucketTagging in the Amazon S3 API Reference. Returns the tag set associated with the +Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 +User Guide. To use this action, you must have permission to perform the GetBucketTagging +action. By default, the bucket owner has this permission and can grant this permission to +others. GetBucketTagging has the following special error: Error code: NoSuchTagSetError + Description: There is no tag set associated with the bucket. All Amazon S3 on +Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to GetBucketTagging: PutBucketTagging +DeleteBucketTagging # Arguments -- `name`: The name of the access point whose policy you want to retrieve. For using this - parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the - x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web - Services SDK and CLI, you must specify the ARN of the access point accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint - /<my-accesspoint-name>. For example, to access the access point reports-ap through - Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding - of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. - The value must be URL encoded. -- `x-amz-account-id`: The account ID for the account that owns the specified access point. +- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with + the REST API, you must specify the name and the x-amz-outpost-id as well. For using this + parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify + the ARN of the bucket accessed in the format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. """ -function get_access_point_policy( +function get_bucket_tagging( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)/policy", + "/v20180820/bucket/$(name)/tagging", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1513,7 +2955,7 @@ function get_access_point_policy( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_policy( +function get_bucket_tagging( name, x_amz_account_id, params::AbstractDict{String}; @@ -1521,7 +2963,7 @@ function get_access_point_policy( ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)/policy", + "/v20180820/bucket/$(name)/tagging", Dict{String,Any}( mergewith( _merge, @@ -1537,25 +2979,36 @@ function get_access_point_policy( end """ - get_access_point_policy_for_object_lambda(name, x-amz-account-id) - get_access_point_policy_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + get_bucket_versioning(name, x-amz-account-id) + get_bucket_versioning(name, x-amz-account-id, params::Dict{String,<:Any}) + + This operation returns the versioning state for S3 on Outposts buckets only. To return the +versioning state for an S3 bucket, see GetBucketVersioning in the Amazon S3 API Reference. + Returns the versioning state for an S3 on Outposts bucket. With S3 Versioning, you can +save multiple distinct copies of your objects and recover from unintended user actions and +application failures. If you've never set versioning on your bucket, it has no versioning +state. In that case, the GetBucketVersioning request does not return a versioning state +value. For more information about versioning, see Versioning in the Amazon S3 User Guide. +All Amazon S3 on Outposts REST API requests for this action require an additional parameter +of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on +Outposts endpoint hostname prefix instead of s3-control. For an example of the request +syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and +the x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following operations are related to GetBucketVersioning for S3 on Outposts. +PutBucketVersioning PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration -Returns the resource policy for an Object Lambda Access Point. The following actions are -related to GetAccessPointPolicyForObjectLambda: DeleteAccessPointPolicyForObjectLambda - PutAccessPointPolicyForObjectLambda # Arguments -- `name`: The name of the Object Lambda Access Point. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `name`: The S3 on Outposts bucket to return the versioning state for. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 on Outposts bucket. """ -function get_access_point_policy_for_object_lambda( +function get_bucket_versioning( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/policy", + "/v20180820/bucket/$(name)/versioning", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1563,7 +3016,7 @@ function get_access_point_policy_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_policy_for_object_lambda( +function get_bucket_versioning( name, x_amz_account_id, params::AbstractDict{String}; @@ -1571,7 +3024,7 @@ function get_access_point_policy_for_object_lambda( ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/policy", + "/v20180820/bucket/$(name)/versioning", Dict{String,Any}( mergewith( _merge, @@ -1587,45 +3040,74 @@ function get_access_point_policy_for_object_lambda( end """ - get_access_point_policy_status(name, x-amz-account-id) - get_access_point_policy_status(name, x-amz-account-id, params::Dict{String,<:Any}) + get_data_access(permission, target, x-amz-account-id) + get_data_access(permission, target, x-amz-account-id, params::Dict{String,<:Any}) -Indicates whether the specified access point currently has a policy that allows public -access. For more information about public access through access points, see Managing Data -Access with Amazon S3 access points in the Amazon S3 User Guide. +Returns a temporary access credential from S3 Access Grants to the grantee or client +application. The temporary credential is an Amazon Web Services STS token that grants them +access to the S3 data. Permissions You must have the s3:GetDataAccess permission to use +this operation. Additional Permissions The IAM role that S3 Access Grants assumes must +have the following permissions specified in the trust policy when registering the location: +sts:AssumeRole, for directory users or groups sts:SetContext, and for IAM users or roles +sts:SetSourceIdentity. # Arguments -- `name`: The name of the access point whose policy status you want to retrieve. -- `x-amz-account-id`: The account ID for the account that owns the specified access point. +- `permission`: The type of permission granted to your S3 data, which can be set to one of + the following values: READ – Grant read-only access to the S3 data. WRITE – Grant + write-only access to the S3 data. READWRITE – Grant both read and write access to the + S3 data. +- `target`: The S3 URI path of the data to which you are requesting temporary access + credentials. If the requesting account has an access grant for this data, S3 Access Grants + vends temporary access credentials in the response. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. -""" -function get_access_point_policy_status( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"durationSeconds"`: The session duration, in seconds, of the temporary access credential + that S3 Access Grants vends to the grantee or client application. The default value is 1 + hour, but the grantee can specify a range from 900 seconds (15 minutes) up to 43200 seconds + (12 hours). If the grantee requests a value higher than this maximum, the operation fails. +- `"privilege"`: The scope of the temporary access credential that S3 Access Grants vends + to the grantee or client application. Default – The scope of the returned temporary + access token is the scope of the grant that is closest to the target scope. Minimal – + The scope of the returned temporary access token is the same as the requested target scope + as long as the requested scope is the same as or a subset of the grant scope. +- `"targetType"`: The type of Target. The only possible value is Object. Pass this value if + the target data that you would like to access is a path to an object. Do not pass this + value if the target data is a bucket or a bucket and a prefix. +""" +function get_data_access( + permission, target, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)/policyStatus", + "/v20180820/accessgrantsinstance/dataaccess", Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "permission" => permission, + "target" => target, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_policy_status( - name, +function get_data_access( + permission, + target, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/accesspoint/$(name)/policyStatus", + "/v20180820/accessgrantsinstance/dataaccess", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "permission" => permission, + "target" => target, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ), params, ), @@ -1636,23 +3118,26 @@ function get_access_point_policy_status( end """ - get_access_point_policy_status_for_object_lambda(name, x-amz-account-id) - get_access_point_policy_status_for_object_lambda(name, x-amz-account-id, params::Dict{String,<:Any}) + get_job_tagging(id, x-amz-account-id) + get_job_tagging(id, x-amz-account-id, params::Dict{String,<:Any}) -Returns the status of the resource policy associated with an Object Lambda Access Point. +Returns the tags on an S3 Batch Operations job. Permissions To use the GetJobTagging +operation, you must have permission to perform the s3:GetJobTagging action. For more +information, see Controlling access and labeling jobs using tags in the Amazon S3 User +Guide. Related actions include: CreateJob PutJobTagging DeleteJobTagging # Arguments -- `name`: The name of the Object Lambda Access Point. -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `id`: The ID for the S3 Batch Operations job whose tags you want to retrieve. +- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch + Operations job. """ -function get_access_point_policy_status_for_object_lambda( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_job_tagging( + id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/policyStatus", + "/v20180820/jobs/$(id)/tagging", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1660,15 +3145,15 @@ function get_access_point_policy_status_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function get_access_point_policy_status_for_object_lambda( - name, +function get_job_tagging( + id, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda/$(name)/policyStatus", + "/v20180820/jobs/$(id)/tagging", Dict{String,Any}( mergewith( _merge, @@ -1684,44 +3169,33 @@ function get_access_point_policy_status_for_object_lambda( end """ - get_bucket(name, x-amz-account-id) - get_bucket(name, x-amz-account-id, params::Dict{String,<:Any}) + get_multi_region_access_point(name, x-amz-account-id) + get_multi_region_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) -Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on -Outposts in the Amazon S3 User Guide. If you are using an identity other than the root user -of the Amazon Web Services account that owns the Outposts bucket, the calling identity must -have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to -the Outposts bucket owner's account in order to use this action. Only users from Outposts -bucket owner account with the right permissions can perform actions on an Outposts bucket. - If you don't have s3-outposts:GetBucket permissions or you're not using an identity that -belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error. The -following actions are related to GetBucket for Amazon S3 on Outposts: All Amazon S3 on -Outposts REST API requests for this action require an additional parameter of -x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts -endpoint hostname prefix instead of s3-control. For an example of the request syntax for -Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. -PutObject CreateBucket DeleteBucket + This operation is not supported by directory buckets. Returns configuration information +about the specified Multi-Region Access Point. This action will always be routed to the US +West (Oregon) Region. For more information about the restrictions around working with +Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in +the Amazon S3 User Guide. The following actions are related to GetMultiRegionAccessPoint: + CreateMultiRegionAccessPoint DeleteMultiRegionAccessPoint +DescribeMultiRegionAccessPointOperation ListMultiRegionAccessPoints # Arguments -- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with - the REST API, you must specify the name and the x-amz-outpost-id as well. For using this - parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify - the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `name`: The name of the Multi-Region Access Point whose configuration information you + want to receive. The name of the Multi-Region Access Point is different from the alias. For + more information about the distinction between the name and the alias of an Multi-Region + Access Point, see Rules for naming Amazon S3 Multi-Region Access Points in the Amazon S3 + User Guide. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. """ -function get_bucket( +function get_multi_region_access_point( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)", + "/v20180820/mrap/instances/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1729,7 +3203,7 @@ function get_bucket( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket( +function get_multi_region_access_point( name, x_amz_account_id, params::AbstractDict{String}; @@ -1737,7 +3211,7 @@ function get_bucket( ) return s3_control( "GET", - "/v20180820/bucket/$(name)", + "/v20180820/mrap/instances/$(name)", Dict{String,Any}( mergewith( _merge, @@ -1753,48 +3227,32 @@ function get_bucket( end """ - get_bucket_lifecycle_configuration(name, x-amz-account-id) - get_bucket_lifecycle_configuration(name, x-amz-account-id, params::Dict{String,<:Any}) + get_multi_region_access_point_policy(name, x-amz-account-id) + get_multi_region_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) - This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 -bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon S3 API -Reference. Returns the lifecycle configuration information set on the Outposts bucket. -For more information, see Using Amazon S3 on Outposts and for information about lifecycle -configuration, see Object Lifecycle Management in Amazon S3 User Guide. To use this -action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration -action. The Outposts bucket owner has this permission, by default. The bucket owner can -grant this permission to others. For more information about permissions, see Permissions -Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 -Resources. All Amazon S3 on Outposts REST API requests for this action require an -additional parameter of x-amz-outpost-id to be passed with the request. In addition, you -must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example -of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint -hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the -Examples section. GetBucketLifecycleConfiguration has the following special error: Error -code: NoSuchLifecycleConfiguration Description: The lifecycle configuration does not -exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client The following -actions are related to GetBucketLifecycleConfiguration: PutBucketLifecycleConfiguration - DeleteBucketLifecycleConfiguration + This operation is not supported by directory buckets. Returns the access control policy +of the specified Multi-Region Access Point. This action will always be routed to the US +West (Oregon) Region. For more information about the restrictions around working with +Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in +the Amazon S3 User Guide. The following actions are related to +GetMultiRegionAccessPointPolicy: GetMultiRegionAccessPointPolicyStatus +PutMultiRegionAccessPointPolicy # Arguments -- `name`: The Amazon Resource Name (ARN) of the bucket. For using this parameter with - Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id - as well. For using this parameter with S3 on Outposts with the Amazon Web Services SDK and - CLI, you must specify the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `name`: Specifies the Multi-Region Access Point. The name of the Multi-Region Access + Point is different from the alias. For more information about the distinction between the + name and the alias of an Multi-Region Access Point, see Rules for naming Amazon S3 + Multi-Region Access Points in the Amazon S3 User Guide. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. """ -function get_bucket_lifecycle_configuration( +function get_multi_region_access_point_policy( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)/lifecycleconfiguration", + "/v20180820/mrap/instances/$(name)/policy", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1802,7 +3260,7 @@ function get_bucket_lifecycle_configuration( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket_lifecycle_configuration( +function get_multi_region_access_point_policy( name, x_amz_account_id, params::AbstractDict{String}; @@ -1810,7 +3268,7 @@ function get_bucket_lifecycle_configuration( ) return s3_control( "GET", - "/v20180820/bucket/$(name)/lifecycleconfiguration", + "/v20180820/mrap/instances/$(name)/policy", Dict{String,Any}( mergewith( _merge, @@ -1826,49 +3284,32 @@ function get_bucket_lifecycle_configuration( end """ - get_bucket_policy(name, x-amz-account-id) - get_bucket_policy(name, x-amz-account-id, params::Dict{String,<:Any}) + get_multi_region_access_point_policy_status(name, x-amz-account-id) + get_multi_region_access_point_policy_status(name, x-amz-account-id, params::Dict{String,<:Any}) - This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for -an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference. Returns the policy of a -specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the -Amazon S3 User Guide. If you are using an identity other than the root user of the Amazon -Web Services account that owns the bucket, the calling identity must have the -GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's -account in order to use this action. Only users from Outposts bucket owner account with the -right permissions can perform actions on an Outposts bucket. If you don't have -s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the -bucket owner's account, Amazon S3 returns a 403 Access Denied error. As a security -precaution, the root user of the Amazon Web Services account that owns a bucket can always -use this action, even if the policy explicitly denies the root user the ability to perform -this action. For more information about bucket policies, see Using Bucket Policies and -User Policies. All Amazon S3 on Outposts REST API requests for this action require an -additional parameter of x-amz-outpost-id to be passed with the request. In addition, you -must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example -of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint -hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the -Examples section. The following actions are related to GetBucketPolicy: GetObject -PutBucketPolicy DeleteBucketPolicy + This operation is not supported by directory buckets. Indicates whether the specified +Multi-Region Access Point has an access control policy that allows public access. This +action will always be routed to the US West (Oregon) Region. For more information about the +restrictions around working with Multi-Region Access Points, see Multi-Region Access Point +restrictions and limitations in the Amazon S3 User Guide. The following actions are related +to GetMultiRegionAccessPointPolicyStatus: GetMultiRegionAccessPointPolicy +PutMultiRegionAccessPointPolicy # Arguments -- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with - the REST API, you must specify the name and the x-amz-outpost-id as well. For using this - parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify - the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `name`: Specifies the Multi-Region Access Point. The name of the Multi-Region Access + Point is different from the alias. For more information about the distinction between the + name and the alias of an Multi-Region Access Point, see Rules for naming Amazon S3 + Multi-Region Access Points in the Amazon S3 User Guide. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. """ -function get_bucket_policy( +function get_multi_region_access_point_policy_status( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)/policy", + "/v20180820/mrap/instances/$(name)/policystatus", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1876,7 +3317,7 @@ function get_bucket_policy( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket_policy( +function get_multi_region_access_point_policy_status( name, x_amz_account_id, params::AbstractDict{String}; @@ -1884,7 +3325,7 @@ function get_bucket_policy( ) return s3_control( "GET", - "/v20180820/bucket/$(name)/policy", + "/v20180820/mrap/instances/$(name)/policystatus", Dict{String,Any}( mergewith( _merge, @@ -1900,54 +3341,27 @@ function get_bucket_policy( end """ - get_bucket_replication(name, x-amz-account-id) - get_bucket_replication(name, x-amz-account-id, params::Dict{String,<:Any}) + get_multi_region_access_point_routes(mrap, x-amz-account-id) + get_multi_region_access_point_routes(mrap, x-amz-account-id, params::Dict{String,<:Any}) - This operation gets an Amazon S3 on Outposts bucket's replication configuration. To get an -S3 bucket's replication configuration, see GetBucketReplication in the Amazon S3 API -Reference. Returns the replication configuration of an S3 on Outposts bucket. For more -information about S3 on Outposts, see Using Amazon S3 on Outposts in the Amazon S3 User -Guide. For information about S3 replication on Outposts configuration, see Replicating -objects for S3 on Outposts in the Amazon S3 User Guide. It can take a while to propagate -PUT or DELETE requests for a replication configuration to all S3 on Outposts systems. -Therefore, the replication configuration that's returned by a GET request soon after a PUT -or DELETE request might return a more recent result than what's on the Outpost. If an -Outpost is offline, the delay in updating the replication configuration on that Outpost can -be significant. This action requires permissions for the -s3-outposts:GetReplicationConfiguration action. The Outposts bucket owner has this -permission by default and can grant it to others. For more information about permissions, -see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts bucket in the -Amazon S3 User Guide. All Amazon S3 on Outposts REST API requests for this action require -an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you -must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example -of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint -hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the -Examples section. If you include the Filter element in a replication configuration, you -must also include the DeleteMarkerReplication, Status, and Priority elements. The response -also returns those elements. For information about S3 on Outposts replication failure -reasons, see Replication failure reasons in the Amazon S3 User Guide. The following -operations are related to GetBucketReplication: PutBucketReplication -DeleteBucketReplication + This operation is not supported by directory buckets. Returns the routing configuration +for a Multi-Region Access Point, indicating which Regions are active or passive. To obtain +routing control changes and failover requests, use the Amazon S3 failover control +infrastructure endpoints in these five Amazon Web Services Regions: us-east-1 +us-west-2 ap-southeast-2 ap-northeast-1 eu-west-1 # Arguments -- `name`: Specifies the bucket to get the replication information for. For using this - parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the - x-amz-outpost-id as well. For using this parameter with S3 on Outposts with the Amazon Web - Services SDK and CLI, you must specify the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `mrap`: The Multi-Region Access Point ARN. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. """ -function get_bucket_replication( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_multi_region_access_point_routes( + mrap, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)/replication", + "/v20180820/mrap/instances/$(mrap)/routes", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -1955,15 +3369,15 @@ function get_bucket_replication( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket_replication( - name, +function get_multi_region_access_point_routes( + mrap, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/bucket/$(name)/replication", + "/v20180820/mrap/instances/$(mrap)/routes", Dict{String,Any}( mergewith( _merge, @@ -1979,43 +3393,25 @@ function get_bucket_replication( end """ - get_bucket_tagging(name, x-amz-account-id) - get_bucket_tagging(name, x-amz-account-id, params::Dict{String,<:Any}) + get_public_access_block(x-amz-account-id) + get_public_access_block(x-amz-account-id, params::Dict{String,<:Any}) - This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see -GetBucketTagging in the Amazon S3 API Reference. Returns the tag set associated with the -Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 -User Guide. To use this action, you must have permission to perform the GetBucketTagging -action. By default, the bucket owner has this permission and can grant this permission to -others. GetBucketTagging has the following special error: Error code: NoSuchTagSetError - Description: There is no tag set associated with the bucket. All Amazon S3 on -Outposts REST API requests for this action require an additional parameter of -x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts -endpoint hostname prefix instead of s3-control. For an example of the request syntax for -Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the -x-amz-outpost-id derived by using the access point ARN, see the Examples section. The -following actions are related to GetBucketTagging: PutBucketTagging -DeleteBucketTagging + This operation is not supported by directory buckets. Retrieves the PublicAccessBlock +configuration for an Amazon Web Services account. For more information, see Using Amazon +S3 block public access. Related actions include: DeletePublicAccessBlock +PutPublicAccessBlock # Arguments -- `name`: Specifies the bucket. For using this parameter with Amazon S3 on Outposts with - the REST API, you must specify the name and the x-amz-outpost-id as well. For using this - parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify - the ARN of the bucket accessed in the format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `x-amz-account-id`: The account ID for the Amazon Web Services account whose + PublicAccessBlock configuration you want to retrieve. """ -function get_bucket_tagging( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_public_access_block( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)/tagging", + "/v20180820/configuration/publicAccessBlock", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2023,15 +3419,14 @@ function get_bucket_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket_tagging( - name, +function get_public_access_block( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/bucket/$(name)/tagging", + "/v20180820/configuration/publicAccessBlock", Dict{String,Any}( mergewith( _merge, @@ -2047,36 +3442,28 @@ function get_bucket_tagging( end """ - get_bucket_versioning(name, x-amz-account-id) - get_bucket_versioning(name, x-amz-account-id, params::Dict{String,<:Any}) - - This operation returns the versioning state for S3 on Outposts buckets only. To return the -versioning state for an S3 bucket, see GetBucketVersioning in the Amazon S3 API Reference. - Returns the versioning state for an S3 on Outposts bucket. With S3 Versioning, you can -save multiple distinct copies of your objects and recover from unintended user actions and -application failures. If you've never set versioning on your bucket, it has no versioning -state. In that case, the GetBucketVersioning request does not return a versioning state -value. For more information about versioning, see Versioning in the Amazon S3 User Guide. -All Amazon S3 on Outposts REST API requests for this action require an additional parameter -of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on -Outposts endpoint hostname prefix instead of s3-control. For an example of the request -syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and -the x-amz-outpost-id derived by using the access point ARN, see the Examples section. The -following operations are related to GetBucketVersioning for S3 on Outposts. -PutBucketVersioning PutBucketLifecycleConfiguration GetBucketLifecycleConfiguration + get_storage_lens_configuration(storagelensid, x-amz-account-id) + get_storage_lens_configuration(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + This operation is not supported by directory buckets. Gets the Amazon S3 Storage Lens +configuration. For more information, see Assessing your storage activity and usage with +Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens +metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide. To use this +action, you must have permission to perform the s3:GetStorageLensConfiguration action. For +more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 +User Guide. # Arguments -- `name`: The S3 on Outposts bucket to return the versioning state for. -- `x-amz-account-id`: The Amazon Web Services account ID of the S3 on Outposts bucket. +- `storagelensid`: The ID of the Amazon S3 Storage Lens configuration. +- `x-amz-account-id`: The account ID of the requester. """ -function get_bucket_versioning( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_storage_lens_configuration( + storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket/$(name)/versioning", + "/v20180820/storagelens/$(storagelensid)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2084,15 +3471,15 @@ function get_bucket_versioning( feature_set=SERVICE_FEATURE_SET, ) end -function get_bucket_versioning( - name, +function get_storage_lens_configuration( + storagelensid, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/bucket/$(name)/versioning", + "/v20180820/storagelens/$(storagelensid)", Dict{String,Any}( mergewith( _merge, @@ -2108,26 +3495,27 @@ function get_bucket_versioning( end """ - get_job_tagging(id, x-amz-account-id) - get_job_tagging(id, x-amz-account-id, params::Dict{String,<:Any}) + get_storage_lens_configuration_tagging(storagelensid, x-amz-account-id) + get_storage_lens_configuration_tagging(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) -Returns the tags on an S3 Batch Operations job. To use the GetJobTagging operation, you -must have permission to perform the s3:GetJobTagging action. For more information, see -Controlling access and labeling jobs using tags in the Amazon S3 User Guide. Related -actions include: CreateJob PutJobTagging DeleteJobTagging + This operation is not supported by directory buckets. Gets the tags of Amazon S3 Storage +Lens configuration. For more information about S3 Storage Lens, see Assessing your storage +activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. To use this +action, you must have permission to perform the s3:GetStorageLensConfigurationTagging +action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the +Amazon S3 User Guide. # Arguments -- `id`: The ID for the S3 Batch Operations job whose tags you want to retrieve. -- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch - Operations job. +- `storagelensid`: The ID of the Amazon S3 Storage Lens configuration. +- `x-amz-account-id`: The account ID of the requester. """ -function get_job_tagging( - id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function get_storage_lens_configuration_tagging( + storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/jobs/$(id)/tagging", + "/v20180820/storagelens/$(storagelensid)/tagging", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2135,15 +3523,15 @@ function get_job_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function get_job_tagging( - id, +function get_storage_lens_configuration_tagging( + storagelensid, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/jobs/$(id)/tagging", + "/v20180820/storagelens/$(storagelensid)/tagging", Dict{String,Any}( mergewith( _merge, @@ -2159,31 +3547,28 @@ function get_job_tagging( end """ - get_multi_region_access_point(name, x-amz-account-id) - get_multi_region_access_point(name, x-amz-account-id, params::Dict{String,<:Any}) + get_storage_lens_group(name, x-amz-account-id) + get_storage_lens_group(name, x-amz-account-id, params::Dict{String,<:Any}) -Returns configuration information about the specified Multi-Region Access Point. This -action will always be routed to the US West (Oregon) Region. For more information about the -restrictions around managing Multi-Region Access Points, see Managing Multi-Region Access -Points in the Amazon S3 User Guide. The following actions are related to -GetMultiRegionAccessPoint: CreateMultiRegionAccessPoint DeleteMultiRegionAccessPoint - DescribeMultiRegionAccessPointOperation ListMultiRegionAccessPoints + Retrieves the Storage Lens group configuration details. To use this operation, you must +have the permission to perform the s3:GetStorageLensGroup action. For more information +about the required Storage Lens Groups permissions, see Setting account permissions to use +S3 Storage Lens groups. For information about Storage Lens groups errors, see List of +Amazon S3 Storage Lens error codes. # Arguments -- `name`: The name of the Multi-Region Access Point whose configuration information you - want to receive. The name of the Multi-Region Access Point is different from the alias. For - more information about the distinction between the name and the alias of an Multi-Region - Access Point, see Managing Multi-Region Access Points in the Amazon S3 User Guide. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region - Access Point. +- `name`: The name of the Storage Lens group that you're trying to retrieve the + configuration details for. +- `x-amz-account-id`: The Amazon Web Services account ID associated with the Storage Lens + group that you're trying to retrieve the details for. """ -function get_multi_region_access_point( +function get_storage_lens_group( name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)", + "/v20180820/storagelensgroup/$(name)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2191,7 +3576,7 @@ function get_multi_region_access_point( feature_set=SERVICE_FEATURE_SET, ) end -function get_multi_region_access_point( +function get_storage_lens_group( name, x_amz_account_id, params::AbstractDict{String}; @@ -2199,7 +3584,7 @@ function get_multi_region_access_point( ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)", + "/v20180820/storagelensgroup/$(name)", Dict{String,Any}( mergewith( _merge, @@ -2215,31 +3600,51 @@ function get_multi_region_access_point( end """ - get_multi_region_access_point_policy(name, x-amz-account-id) - get_multi_region_access_point_policy(name, x-amz-account-id, params::Dict{String,<:Any}) + list_access_grants(x-amz-account-id) + list_access_grants(x-amz-account-id, params::Dict{String,<:Any}) -Returns the access control policy of the specified Multi-Region Access Point. This action -will always be routed to the US West (Oregon) Region. For more information about the -restrictions around managing Multi-Region Access Points, see Managing Multi-Region Access -Points in the Amazon S3 User Guide. The following actions are related to -GetMultiRegionAccessPointPolicy: GetMultiRegionAccessPointPolicyStatus -PutMultiRegionAccessPointPolicy +Returns the list of access grants in your S3 Access Grants instance. Permissions You must +have the s3:ListAccessGrants permission to use this operation. # Arguments -- `name`: Specifies the Multi-Region Access Point. The name of the Multi-Region Access - Point is different from the alias. For more information about the distinction between the - name and the alias of an Multi-Region Access Point, see Managing Multi-Region Access Points - in the Amazon S3 User Guide. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region - Access Point. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. -""" -function get_multi_region_access_point_policy( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"application_arn"`: The Amazon Resource Name (ARN) of an Amazon Web Services IAM + Identity Center application associated with your Identity Center instance. If the grant + includes an application ARN, the grantee can only access the S3 data through this + application. +- `"granteeidentifier"`: The unique identifer of the Grantee. If the grantee type is IAM, + the identifier is the IAM Amazon Resource Name (ARN) of the user or role. If the grantee + type is a directory user or group, the identifier is 128-bit universally unique identifier + (UUID) in the format a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. You can obtain this UUID from + your Amazon Web Services IAM Identity Center instance. +- `"granteetype"`: The type of the grantee to which access has been granted. It can be one + of the following values: IAM - An IAM user or role. DIRECTORY_USER - Your corporate + directory user. You can use this option if you have added your corporate identity directory + to IAM Identity Center and associated the IAM Identity Center instance with your S3 Access + Grants instance. DIRECTORY_GROUP - Your corporate directory group. You can use this + option if you have added your corporate identity directory to IAM Identity Center and + associated the IAM Identity Center instance with your S3 Access Grants instance. +- `"grantscope"`: The S3 path of the data to which you are granting access. It is the + result of appending the Subprefix to the location scope. +- `"maxResults"`: The maximum number of access grants that you would like returned in the + List Access Grants response. If the results include the pagination token NextToken, make + another call using the NextToken to determine if there are more results. +- `"nextToken"`: A pagination token to request the next page of results. Pass this value + into a subsequent List Access Grants request in order to retrieve the next page of results. +- `"permission"`: The type of permission granted to your S3 data, which can be set to one + of the following values: READ – Grant read-only access to the S3 data. WRITE – + Grant write-only access to the S3 data. READWRITE – Grant both read and write access + to the S3 data. +""" +function list_access_grants( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)/policy", + "/v20180820/accessgrantsinstance/grants", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2247,15 +3652,14 @@ function get_multi_region_access_point_policy( feature_set=SERVICE_FEATURE_SET, ) end -function get_multi_region_access_point_policy( - name, +function list_access_grants( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)/policy", + "/v20180820/accessgrantsinstance/grants", Dict{String,Any}( mergewith( _merge, @@ -2271,31 +3675,32 @@ function get_multi_region_access_point_policy( end """ - get_multi_region_access_point_policy_status(name, x-amz-account-id) - get_multi_region_access_point_policy_status(name, x-amz-account-id, params::Dict{String,<:Any}) + list_access_grants_instances(x-amz-account-id) + list_access_grants_instances(x-amz-account-id, params::Dict{String,<:Any}) -Indicates whether the specified Multi-Region Access Point has an access control policy that -allows public access. This action will always be routed to the US West (Oregon) Region. For -more information about the restrictions around managing Multi-Region Access Points, see -Managing Multi-Region Access Points in the Amazon S3 User Guide. The following actions are -related to GetMultiRegionAccessPointPolicyStatus: GetMultiRegionAccessPointPolicy -PutMultiRegionAccessPointPolicy +Returns a list of S3 Access Grants instances. An S3 Access Grants instance serves as a +logical grouping for your individual access grants. You can only have one S3 Access Grants +instance per Region per account. Permissions You must have the +s3:ListAccessGrantsInstances permission to use this operation. # Arguments -- `name`: Specifies the Multi-Region Access Point. The name of the Multi-Region Access - Point is different from the alias. For more information about the distinction between the - name and the alias of an Multi-Region Access Point, see Managing Multi-Region Access Points - in the Amazon S3 User Guide. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region - Access Point. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. -""" -function get_multi_region_access_point_policy_status( - name, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of access grants that you would like returned in the + List Access Grants response. If the results include the pagination token NextToken, make + another call using the NextToken to determine if there are more results. +- `"nextToken"`: A pagination token to request the next page of results. Pass this value + into a subsequent List Access Grants Instances request in order to retrieve the next page + of results. +""" +function list_access_grants_instances( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)/policystatus", + "/v20180820/accessgrantsinstances", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2303,15 +3708,14 @@ function get_multi_region_access_point_policy_status( feature_set=SERVICE_FEATURE_SET, ) end -function get_multi_region_access_point_policy_status( - name, +function list_access_grants_instances( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/mrap/instances/$(name)/policystatus", + "/v20180820/accessgrantsinstances", Dict{String,Any}( mergewith( _merge, @@ -2327,27 +3731,36 @@ function get_multi_region_access_point_policy_status( end """ - get_multi_region_access_point_routes(mrap, x-amz-account-id) - get_multi_region_access_point_routes(mrap, x-amz-account-id, params::Dict{String,<:Any}) + list_access_grants_locations(x-amz-account-id) + list_access_grants_locations(x-amz-account-id, params::Dict{String,<:Any}) -Returns the routing configuration for a Multi-Region Access Point, indicating which Regions -are active or passive. To obtain routing control changes and failover requests, use the -Amazon S3 failover control infrastructure endpoints in these five Amazon Web Services -Regions: us-east-1 us-west-2 ap-southeast-2 ap-northeast-1 eu-west-1 -Your Amazon S3 bucket does not need to be in these five Regions. +Returns a list of the locations registered in your S3 Access Grants instance. Permissions +You must have the s3:ListAccessGrantsLocations permission to use this operation. # Arguments -- `mrap`: The Multi-Region Access Point ARN. -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region - Access Point. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. -""" -function get_multi_region_access_point_routes( - mrap, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"locationscope"`: The S3 path to the location that you are registering. The location + scope can be the default S3 location s3://, the S3 path to a bucket s3://<bucket>, or + the S3 path to a bucket and prefix s3://<bucket>/<prefix>. A prefix in S3 is a + string of characters at the beginning of an object key name used to organize the objects + that you store in your S3 buckets. For example, object key names that start with the + engineering/ prefix or object key names that start with the marketing/campaigns/ prefix. +- `"maxResults"`: The maximum number of access grants that you would like returned in the + List Access Grants response. If the results include the pagination token NextToken, make + another call using the NextToken to determine if there are more results. +- `"nextToken"`: A pagination token to request the next page of results. Pass this value + into a subsequent List Access Grants Locations request in order to retrieve the next page + of results. +""" +function list_access_grants_locations( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/mrap/instances/$(mrap)/routes", + "/v20180820/accessgrantsinstance/locations", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2355,15 +3768,14 @@ function get_multi_region_access_point_routes( feature_set=SERVICE_FEATURE_SET, ) end -function get_multi_region_access_point_routes( - mrap, +function list_access_grants_locations( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/mrap/instances/$(mrap)/routes", + "/v20180820/accessgrantsinstance/locations", Dict{String,Any}( mergewith( _merge, @@ -2379,24 +3791,52 @@ function get_multi_region_access_point_routes( end """ - get_public_access_block(x-amz-account-id) - get_public_access_block(x-amz-account-id, params::Dict{String,<:Any}) + list_access_points(x-amz-account-id) + list_access_points(x-amz-account-id, params::Dict{String,<:Any}) -Retrieves the PublicAccessBlock configuration for an Amazon Web Services account. For more -information, see Using Amazon S3 block public access. Related actions include: -DeletePublicAccessBlock PutPublicAccessBlock + This operation is not supported by directory buckets. Returns a list of the access points +that are owned by the current account that's associated with the specified bucket. You can +retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 +access points (or the number specified in maxResults, whichever is less), the response will +include a continuation token that you can use to list the additional access points. All +Amazon S3 on Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to ListAccessPoints: CreateAccessPoint +DeleteAccessPoint GetAccessPoint # Arguments -- `x-amz-account-id`: The account ID for the Amazon Web Services account whose - PublicAccessBlock configuration you want to retrieve. +- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the + specified access points. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"bucket"`: The name of the bucket whose associated access points you want to list. For + using this parameter with Amazon S3 on Outposts with the REST API, you must specify the + name and the x-amz-outpost-id as well. For using this parameter with S3 on Outposts with + the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the + format + arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< + ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost + owned by account 123456789012 in Region us-west-2, use the URL encoding of + arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value + must be URL encoded. +- `"maxResults"`: The maximum number of access points that you want to include in the list. + If the specified bucket has more than this number of access points, then the response will + include a continuation token in the NextToken field that you can use to retrieve the next + page of access points. +- `"nextToken"`: A continuation token. If a previous call to ListAccessPoints returned a + continuation token in the NextToken field, then providing that value here causes Amazon S3 + to retrieve the next page of results. """ -function get_public_access_block( +function list_access_points( x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/configuration/publicAccessBlock", + "/v20180820/accesspoint", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2404,14 +3844,14 @@ function get_public_access_block( feature_set=SERVICE_FEATURE_SET, ) end -function get_public_access_block( +function list_access_points( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/configuration/publicAccessBlock", + "/v20180820/accesspoint", Dict{String,Any}( mergewith( _merge, @@ -2427,27 +3867,36 @@ function get_public_access_block( end """ - get_storage_lens_configuration(storagelensid, x-amz-account-id) - get_storage_lens_configuration(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + list_access_points_for_object_lambda(x-amz-account-id) + list_access_points_for_object_lambda(x-amz-account-id, params::Dict{String,<:Any}) -Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your -storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a -complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the -Amazon S3 User Guide. To use this action, you must have permission to perform the -s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use -Amazon S3 Storage Lens in the Amazon S3 User Guide. + This operation is not supported by directory buckets. Returns some or all (up to 1,000) +access points associated with the Object Lambda Access Point per call. If there are more +access points than what can be returned in one call, the response will include a +continuation token that you can use to list the additional access points. The following +actions are related to ListAccessPointsForObjectLambda: CreateAccessPointForObjectLambda + DeleteAccessPointForObjectLambda GetAccessPointForObjectLambda # Arguments -- `storagelensid`: The ID of the Amazon S3 Storage Lens configuration. -- `x-amz-account-id`: The account ID of the requester. +- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda + Access Point. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of access points that you want to include in the list. + The response may contain fewer access points but will never contain more. If there are more + than this number of access points, then the response will include a continuation token in + the NextToken field that you can use to retrieve the next page of access points. +- `"nextToken"`: If the list has more access points than can be returned in one call to + this API, this field contains a continuation token that you can provide in subsequent calls + to this API to retrieve additional access points. """ -function get_storage_lens_configuration( - storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function list_access_points_for_object_lambda( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/storagelens/$(storagelensid)", + "/v20180820/accesspointforobjectlambda", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2455,15 +3904,14 @@ function get_storage_lens_configuration( feature_set=SERVICE_FEATURE_SET, ) end -function get_storage_lens_configuration( - storagelensid, +function list_access_points_for_object_lambda( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/storagelens/$(storagelensid)", + "/v20180820/accesspointforobjectlambda", Dict{String,Any}( mergewith( _merge, @@ -2479,26 +3927,34 @@ function get_storage_lens_configuration( end """ - get_storage_lens_configuration_tagging(storagelensid, x-amz-account-id) - get_storage_lens_configuration_tagging(storagelensid, x-amz-account-id, params::Dict{String,<:Any}) + list_jobs(x-amz-account-id) + list_jobs(x-amz-account-id, params::Dict{String,<:Any}) -Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 -Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in -the Amazon S3 User Guide. To use this action, you must have permission to perform the -s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions -to use Amazon S3 Storage Lens in the Amazon S3 User Guide. +Lists current S3 Batch Operations jobs as well as the jobs that have ended within the last +90 days for the Amazon Web Services account making the request. For more information, see +S3 Batch Operations in the Amazon S3 User Guide. Permissions To use the ListJobs +operation, you must have permission to perform the s3:ListJobs action. Related actions +include: CreateJob DescribeJob UpdateJobPriority UpdateJobStatus # Arguments -- `storagelensid`: The ID of the Amazon S3 Storage Lens configuration. -- `x-amz-account-id`: The account ID of the requester. +- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch + Operations job. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"jobStatuses"`: The List Jobs request returns jobs that match the statuses listed in + this element. +- `"maxResults"`: The maximum number of jobs that Amazon S3 will include in the List Jobs + response. If there are more jobs than this number, the response will include a pagination + token in the NextToken field to enable you to retrieve the next page of results. +- `"nextToken"`: A pagination token to request the next page of results. Use the token that + Amazon S3 returned in the NextToken element of the ListJobsResult from the previous List + Jobs request. """ -function get_storage_lens_configuration_tagging( - storagelensid, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() -) +function list_jobs(x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config()) return s3_control( "GET", - "/v20180820/storagelens/$(storagelensid)/tagging", + "/v20180820/jobs", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2506,15 +3962,14 @@ function get_storage_lens_configuration_tagging( feature_set=SERVICE_FEATURE_SET, ) end -function get_storage_lens_configuration_tagging( - storagelensid, +function list_jobs( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/storagelens/$(storagelensid)/tagging", + "/v20180820/jobs", Dict{String,Any}( mergewith( _merge, @@ -2530,51 +3985,35 @@ function get_storage_lens_configuration_tagging( end """ - list_access_points(x-amz-account-id) - list_access_points(x-amz-account-id, params::Dict{String,<:Any}) + list_multi_region_access_points(x-amz-account-id) + list_multi_region_access_points(x-amz-account-id, params::Dict{String,<:Any}) -Returns a list of the access points that are owned by the current account that's associated -with the specified bucket. You can retrieve up to 1000 access points per call. If the -specified bucket has more than 1,000 access points (or the number specified in maxResults, -whichever is less), the response will include a continuation token that you can use to list -the additional access points. All Amazon S3 on Outposts REST API requests for this action -require an additional parameter of x-amz-outpost-id to be passed with the request. In -addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. -For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts -endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, -see the Examples section. The following actions are related to ListAccessPoints: -CreateAccessPoint DeleteAccessPoint GetAccessPoint + This operation is not supported by directory buckets. Returns a list of the Multi-Region +Access Points currently associated with the specified Amazon Web Services account. Each +call can return up to 100 Multi-Region Access Points, the maximum number of Multi-Region +Access Points that can be associated with a single account. This action will always be +routed to the US West (Oregon) Region. For more information about the restrictions around +working with Multi-Region Access Points, see Multi-Region Access Point restrictions and +limitations in the Amazon S3 User Guide. The following actions are related to +ListMultiRegionAccessPoint: CreateMultiRegionAccessPoint +DeleteMultiRegionAccessPoint DescribeMultiRegionAccessPointOperation +GetMultiRegionAccessPoint # Arguments -- `x-amz-account-id`: The Amazon Web Services account ID for the account that owns the - specified access points. +- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region + Access Point. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"bucket"`: The name of the bucket whose associated access points you want to list. For - using this parameter with Amazon S3 on Outposts with the REST API, you must specify the - name and the x-amz-outpost-id as well. For using this parameter with S3 on Outposts with - the Amazon Web Services SDK and CLI, you must specify the ARN of the bucket accessed in the - format - arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/< - ;my-bucket-name>. For example, to access the bucket reports through Outpost my-outpost - owned by account 123456789012 in Region us-west-2, use the URL encoding of - arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value - must be URL encoded. -- `"maxResults"`: The maximum number of access points that you want to include in the list. - If the specified bucket has more than this number of access points, then the response will - include a continuation token in the NextToken field that you can use to retrieve the next - page of access points. -- `"nextToken"`: A continuation token. If a previous call to ListAccessPoints returned a - continuation token in the NextToken field, then providing that value here causes Amazon S3 - to retrieve the next page of results. +- `"maxResults"`: Not currently used. Do not use this parameter. +- `"nextToken"`: Not currently used. Do not use this parameter. """ -function list_access_points( +function list_multi_region_access_points( x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspoint", + "/v20180820/mrap/instances", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2582,14 +4021,14 @@ function list_access_points( feature_set=SERVICE_FEATURE_SET, ) end -function list_access_points( +function list_multi_region_access_points( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/accesspoint", + "/v20180820/mrap/instances", Dict{String,Any}( mergewith( _merge, @@ -2605,36 +4044,31 @@ function list_access_points( end """ - list_access_points_for_object_lambda(x-amz-account-id) - list_access_points_for_object_lambda(x-amz-account-id, params::Dict{String,<:Any}) + list_regional_buckets(x-amz-account-id) + list_regional_buckets(x-amz-account-id, params::Dict{String,<:Any}) -Returns some or all (up to 1,000) access points associated with the Object Lambda Access -Point per call. If there are more access points than what can be returned in one call, the -response will include a continuation token that you can use to list the additional access -points. The following actions are related to ListAccessPointsForObjectLambda: -CreateAccessPointForObjectLambda DeleteAccessPointForObjectLambda -GetAccessPointForObjectLambda + This operation is not supported by directory buckets. Returns a list of all Outposts +buckets in an Outpost that are owned by the authenticated sender of the request. For more +information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide. For an example of +the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +prefix and x-amz-outpost-id in your request, see the Examples section. # Arguments -- `x-amz-account-id`: The account ID for the account that owns the specified Object Lambda - Access Point. +- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of access points that you want to include in the list. - The response may contain fewer access points but will never contain more. If there are more - than this number of access points, then the response will include a continuation token in - the NextToken field that you can use to retrieve the next page of access points. -- `"nextToken"`: If the list has more access points than can be returned in one call to - this API, this field contains a continuation token that you can provide in subsequent calls - to this API to retrieve additional access points. +- `"maxResults"`: +- `"nextToken"`: +- `"x-amz-outpost-id"`: The ID of the Outposts resource. This ID is required by Amazon S3 + on Outposts buckets. """ -function list_access_points_for_object_lambda( +function list_regional_buckets( x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda", + "/v20180820/bucket", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2642,14 +4076,14 @@ function list_access_points_for_object_lambda( feature_set=SERVICE_FEATURE_SET, ) end -function list_access_points_for_object_lambda( +function list_regional_buckets( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/accesspointforobjectlambda", + "/v20180820/bucket", Dict{String,Any}( mergewith( _merge, @@ -2665,33 +4099,29 @@ function list_access_points_for_object_lambda( end """ - list_jobs(x-amz-account-id) - list_jobs(x-amz-account-id, params::Dict{String,<:Any}) + list_storage_lens_configurations(x-amz-account-id) + list_storage_lens_configurations(x-amz-account-id, params::Dict{String,<:Any}) -Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for -the Amazon Web Services account making the request. For more information, see S3 Batch -Operations in the Amazon S3 User Guide. Related actions include: CreateJob -DescribeJob UpdateJobPriority UpdateJobStatus + This operation is not supported by directory buckets. Gets a list of Amazon S3 Storage +Lens configurations. For more information about S3 Storage Lens, see Assessing your storage +activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. To use this +action, you must have permission to perform the s3:ListStorageLensConfigurations action. +For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon +S3 User Guide. # Arguments -- `x-amz-account-id`: The Amazon Web Services account ID associated with the S3 Batch - Operations job. +- `x-amz-account-id`: The account ID of the requester. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"jobStatuses"`: The List Jobs request returns jobs that match the statuses listed in - this element. -- `"maxResults"`: The maximum number of jobs that Amazon S3 will include in the List Jobs - response. If there are more jobs than this number, the response will include a pagination - token in the NextToken field to enable you to retrieve the next page of results. -- `"nextToken"`: A pagination token to request the next page of results. Use the token that - Amazon S3 returned in the NextToken element of the ListJobsResult from the previous List - Jobs request. +- `"nextToken"`: A pagination token to request the next page of results. """ -function list_jobs(x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config()) +function list_storage_lens_configurations( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) return s3_control( "GET", - "/v20180820/jobs", + "/v20180820/storagelens", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2699,14 +4129,14 @@ function list_jobs(x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_co feature_set=SERVICE_FEATURE_SET, ) end -function list_jobs( +function list_storage_lens_configurations( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/jobs", + "/v20180820/storagelens", Dict{String,Any}( mergewith( _merge, @@ -2722,34 +4152,30 @@ function list_jobs( end """ - list_multi_region_access_points(x-amz-account-id) - list_multi_region_access_points(x-amz-account-id, params::Dict{String,<:Any}) + list_storage_lens_groups(x-amz-account-id) + list_storage_lens_groups(x-amz-account-id, params::Dict{String,<:Any}) -Returns a list of the Multi-Region Access Points currently associated with the specified -Amazon Web Services account. Each call can return up to 100 Multi-Region Access Points, the -maximum number of Multi-Region Access Points that can be associated with a single account. -This action will always be routed to the US West (Oregon) Region. For more information -about the restrictions around managing Multi-Region Access Points, see Managing -Multi-Region Access Points in the Amazon S3 User Guide. The following actions are related -to ListMultiRegionAccessPoint: CreateMultiRegionAccessPoint -DeleteMultiRegionAccessPoint DescribeMultiRegionAccessPointOperation -GetMultiRegionAccessPoint + Lists all the Storage Lens groups in the specified home Region. To use this operation, +you must have the permission to perform the s3:ListStorageLensGroups action. For more +information about the required Storage Lens Groups permissions, see Setting account +permissions to use S3 Storage Lens groups. For information about Storage Lens groups +errors, see List of Amazon S3 Storage Lens error codes. # Arguments -- `x-amz-account-id`: The Amazon Web Services account ID for the owner of the Multi-Region - Access Point. +- `x-amz-account-id`: The Amazon Web Services account ID that owns the Storage Lens + groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Not currently used. Do not use this parameter. -- `"nextToken"`: Not currently used. Do not use this parameter. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ -function list_multi_region_access_points( +function list_storage_lens_groups( x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/mrap/instances", + "/v20180820/storagelensgroup", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2757,14 +4183,14 @@ function list_multi_region_access_points( feature_set=SERVICE_FEATURE_SET, ) end -function list_multi_region_access_points( +function list_storage_lens_groups( x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/mrap/instances", + "/v20180820/storagelensgroup", Dict{String,Any}( mergewith( _merge, @@ -2780,31 +4206,32 @@ function list_multi_region_access_points( end """ - list_regional_buckets(x-amz-account-id) - list_regional_buckets(x-amz-account-id, params::Dict{String,<:Any}) + list_tags_for_resource(resource_arn, x-amz-account-id) + list_tags_for_resource(resource_arn, x-amz-account-id, params::Dict{String,<:Any}) -Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated -sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon -S3 User Guide. For an example of the request syntax for Amazon S3 on Outposts that uses the -S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the -Examples section. +This operation allows you to list all the Amazon Web Services resource tags for a specified +resource. Each tag is a label consisting of a user-defined key and value. Tags can help you +manage, identify, organize, search for, and filter resources. Permissions You must have +the s3:ListTagsForResource permission to use this operation. This operation is only +supported for S3 Storage Lens groups and for S3 Access Grants. The tagged resource can be +an S3 Storage Lens group or S3 Access Grants instance, registered location, or grant. For +more information about the required Storage Lens Groups permissions, see Setting account +permissions to use S3 Storage Lens groups. For information about S3 Tagging errors, see +List of Amazon S3 Tagging error codes. # Arguments -- `x-amz-account-id`: The Amazon Web Services account ID of the Outposts bucket. +- `resource_arn`: The Amazon Resource Name (ARN) of the S3 resource that you want to list + the tags for. The tagged resource can be an S3 Storage Lens group or S3 Access Grants + instance, registered location, or grant. +- `x-amz-account-id`: The Amazon Web Services account ID of the resource owner. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: -- `"nextToken"`: -- `"x-amz-outpost-id"`: The ID of the Outposts resource. This ID is required by Amazon S3 - on Outposts buckets. """ -function list_regional_buckets( - x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function list_tags_for_resource( + resourceArn, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( "GET", - "/v20180820/bucket", + "/v20180820/tags/$(resourceArn)", Dict{String,Any}( "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) ); @@ -2812,14 +4239,15 @@ function list_regional_buckets( feature_set=SERVICE_FEATURE_SET, ) end -function list_regional_buckets( +function list_tags_for_resource( + resourceArn, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( "GET", - "/v20180820/bucket", + "/v20180820/tags/$(resourceArn)", Dict{String,Any}( mergewith( _merge, @@ -2835,48 +4263,50 @@ function list_regional_buckets( end """ - list_storage_lens_configurations(x-amz-account-id) - list_storage_lens_configurations(x-amz-account-id, params::Dict{String,<:Any}) + put_access_grants_instance_resource_policy(policy, x-amz-account-id) + put_access_grants_instance_resource_policy(policy, x-amz-account-id, params::Dict{String,<:Any}) -Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage -Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the -Amazon S3 User Guide. To use this action, you must have permission to perform the -s3:ListStorageLensConfigurations action. For more information, see Setting permissions to -use Amazon S3 Storage Lens in the Amazon S3 User Guide. +Updates the resource policy of the S3 Access Grants instance. Permissions You must have +the s3:PutAccessGrantsInstanceResourcePolicy permission to use this operation. # Arguments -- `x-amz-account-id`: The account ID of the requester. +- `policy`: The resource policy of the S3 Access Grants instance that you are updating. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"nextToken"`: A pagination token to request the next page of results. +- `"Organization"`: The Organization of the resource policy of the S3 Access Grants + instance. """ -function list_storage_lens_configurations( - x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +function put_access_grants_instance_resource_policy( + Policy, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() ) return s3_control( - "GET", - "/v20180820/storagelens", + "PUT", + "/v20180820/accessgrantsinstance/resourcepolicy", Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "Policy" => Policy, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_storage_lens_configurations( +function put_access_grants_instance_resource_policy( + Policy, x_amz_account_id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return s3_control( - "GET", - "/v20180820/storagelens", + "PUT", + "/v20180820/accessgrantsinstance/resourcepolicy", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + "Policy" => Policy, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), ), params, ), @@ -2890,9 +4320,9 @@ end put_access_point_configuration_for_object_lambda(configuration, name, x-amz-account-id) put_access_point_configuration_for_object_lambda(configuration, name, x-amz-account-id, params::Dict{String,<:Any}) -Replaces configuration for an Object Lambda Access Point. The following actions are related -to PutAccessPointConfigurationForObjectLambda: -GetAccessPointConfigurationForObjectLambda + This operation is not supported by directory buckets. Replaces configuration for an +Object Lambda Access Point. The following actions are related to +PutAccessPointConfigurationForObjectLambda: GetAccessPointConfigurationForObjectLambda # Arguments - `configuration`: Object Lambda Access Point configuration document. @@ -2944,15 +4374,16 @@ end put_access_point_policy(policy, name, x-amz-account-id) put_access_point_policy(policy, name, x-amz-account-id, params::Dict{String,<:Any}) -Associates an access policy with the specified access point. Each access point can have -only one policy, so a request made to this API replaces any existing policy associated with -the specified access point. All Amazon S3 on Outposts REST API requests for this action -require an additional parameter of x-amz-outpost-id to be passed with the request. In -addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. -For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts -endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, -see the Examples section. The following actions are related to PutAccessPointPolicy: -GetAccessPointPolicy DeleteAccessPointPolicy + This operation is not supported by directory buckets. Associates an access policy with +the specified access point. Each access point can have only one policy, so a request made +to this API replaces any existing policy associated with the specified access point. All +Amazon S3 on Outposts REST API requests for this action require an additional parameter of +x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts +endpoint hostname prefix instead of s3-control. For an example of the request syntax for +Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the +x-amz-outpost-id derived by using the access point ARN, see the Examples section. The +following actions are related to PutAccessPointPolicy: GetAccessPointPolicy +DeleteAccessPointPolicy # Arguments - `policy`: The policy that you want to apply to the specified access point. For more @@ -3015,10 +4446,11 @@ end put_access_point_policy_for_object_lambda(policy, name, x-amz-account-id) put_access_point_policy_for_object_lambda(policy, name, x-amz-account-id, params::Dict{String,<:Any}) -Creates or replaces resource policy for an Object Lambda Access Point. For an example -policy, see Creating Object Lambda Access Points in the Amazon S3 User Guide. The following -actions are related to PutAccessPointPolicyForObjectLambda: -DeleteAccessPointPolicyForObjectLambda GetAccessPointPolicyForObjectLambda + This operation is not supported by directory buckets. Creates or replaces resource policy +for an Object Lambda Access Point. For an example policy, see Creating Object Lambda Access +Points in the Amazon S3 User Guide. The following actions are related to +PutAccessPointPolicyForObjectLambda: DeleteAccessPointPolicyForObjectLambda +GetAccessPointPolicyForObjectLambda # Arguments - `policy`: Object Lambda Access Point resource policy document. @@ -3495,8 +4927,8 @@ associate S3 Batch Operations tags with any job by sending a PUT request against tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use -this action to replace the tag set with the one you modified. For more information, see -Controlling access and labeling jobs using tags in the Amazon S3 User Guide. If you +this operation to replace the tag set with the one you modified. For more information, see +Controlling access and labeling jobs using tags in the Amazon S3 User Guide. If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing. For deleting existing tags for your Batch @@ -3506,9 +4938,10 @@ the maximum number of tags to 50 tags per job. You can associate up to 50 tags as long as they have unique tag keys. A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length. The key and values are case sensitive. For tagging-related restrictions related to characters and encodings, -see User-Defined Tag Restrictions in the Billing and Cost Management User Guide. To -use the PutJobTagging operation, you must have permission to perform the s3:PutJobTagging -action. Related actions include: CreateJob GetJobTagging DeleteJobTagging +see User-Defined Tag Restrictions in the Billing and Cost Management User Guide. +Permissions To use the PutJobTagging operation, you must have permission to perform the +s3:PutJobTagging action. Related actions include: CreateJob GetJobTagging +DeleteJobTagging # Arguments - `tags`: The set of tags to associate with the S3 Batch Operations job. @@ -3560,13 +4993,14 @@ end put_multi_region_access_point_policy(client_token, details, x-amz-account-id) put_multi_region_access_point_policy(client_token, details, x-amz-account-id, params::Dict{String,<:Any}) -Associates an access control policy with the specified Multi-Region Access Point. Each -Multi-Region Access Point can have only one policy, so a request made to this action -replaces any existing policy that is associated with the specified Multi-Region Access -Point. This action will always be routed to the US West (Oregon) Region. For more -information about the restrictions around managing Multi-Region Access Points, see Managing -Multi-Region Access Points in the Amazon S3 User Guide. The following actions are related -to PutMultiRegionAccessPointPolicy: GetMultiRegionAccessPointPolicy + This operation is not supported by directory buckets. Associates an access control policy +with the specified Multi-Region Access Point. Each Multi-Region Access Point can have only +one policy, so a request made to this action replaces any existing policy that is +associated with the specified Multi-Region Access Point. This action will always be routed +to the US West (Oregon) Region. For more information about the restrictions around working +with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations +in the Amazon S3 User Guide. The following actions are related to +PutMultiRegionAccessPointPolicy: GetMultiRegionAccessPointPolicy GetMultiRegionAccessPointPolicyStatus # Arguments @@ -3626,10 +5060,11 @@ end put_public_access_block(public_access_block_configuration, x-amz-account-id) put_public_access_block(public_access_block_configuration, x-amz-account-id, params::Dict{String,<:Any}) -Creates or modifies the PublicAccessBlock configuration for an Amazon Web Services account. -For this operation, users must have the s3:PutAccountPublicAccessBlock permission. For more -information, see Using Amazon S3 block public access. Related actions include: -GetPublicAccessBlock DeletePublicAccessBlock + This operation is not supported by directory buckets. Creates or modifies the +PublicAccessBlock configuration for an Amazon Web Services account. For this operation, +users must have the s3:PutAccountPublicAccessBlock permission. For more information, see +Using Amazon S3 block public access. Related actions include: GetPublicAccessBlock +DeletePublicAccessBlock # Arguments - `public_access_block_configuration`: The PublicAccessBlock configuration that you want to @@ -3682,12 +5117,13 @@ end put_storage_lens_configuration(storage_lens_configuration, storagelensid, x-amz-account-id) put_storage_lens_configuration(storage_lens_configuration, storagelensid, x-amz-account-id, params::Dict{String,<:Any}) -Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, -see Working with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of -S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide. -To use this action, you must have permission to perform the s3:PutStorageLensConfiguration -action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the -Amazon S3 User Guide. + This operation is not supported by directory buckets. Puts an Amazon S3 Storage Lens +configuration. For more information about S3 Storage Lens, see Working with Amazon S3 +Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, +see S3 Storage Lens metrics glossary in the Amazon S3 User Guide. To use this action, you +must have permission to perform the s3:PutStorageLensConfiguration action. For more +information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User +Guide. # Arguments - `storage_lens_configuration`: The S3 Storage Lens configuration. @@ -3745,12 +5181,12 @@ end put_storage_lens_configuration_tagging(tag, storagelensid, x-amz-account-id) put_storage_lens_configuration_tagging(tag, storagelensid, x-amz-account-id, params::Dict{String,<:Any}) -Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more -information about S3 Storage Lens, see Assessing your storage activity and usage with -Amazon S3 Storage Lens in the Amazon S3 User Guide. To use this action, you must have -permission to perform the s3:PutStorageLensConfigurationTagging action. For more -information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User -Guide. + This operation is not supported by directory buckets. Put or replace tags on an existing +Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see +Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 +User Guide. To use this action, you must have permission to perform the +s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions +to use Amazon S3 Storage Lens in the Amazon S3 User Guide. # Arguments - `tag`: The tag set of the S3 Storage Lens configuration. You can set up to a maximum of @@ -3806,20 +5242,20 @@ end submit_multi_region_access_point_routes(route, mrap, x-amz-account-id) submit_multi_region_access_point_routes(route, mrap, x-amz-account-id, params::Dict{String,<:Any}) -Submits an updated route configuration for a Multi-Region Access Point. This API operation -updates the routing status for the specified Regions from active to passive, or from -passive to active. A value of 0 indicates a passive status, which means that traffic won't -be routed to the specified Region. A value of 100 indicates an active status, which means -that traffic will be routed to the specified Region. At least one Region must be active at -all times. When the routing configuration is changed, any in-progress operations (uploads, -copies, deletes, and so on) to formerly active Regions will continue to run to their final -completion state (success or failure). The routing configurations of any Regions that -aren’t specified remain unchanged. Updated routing configurations might not be -immediately applied. It can take up to 2 minutes for your changes to take effect. To -submit routing control changes and failover requests, use the Amazon S3 failover control -infrastructure endpoints in these five Amazon Web Services Regions: us-east-1 -us-west-2 ap-southeast-2 ap-northeast-1 eu-west-1 Your Amazon S3 bucket -does not need to be in these five Regions. + This operation is not supported by directory buckets. Submits an updated route +configuration for a Multi-Region Access Point. This API operation updates the routing +status for the specified Regions from active to passive, or from passive to active. A value +of 0 indicates a passive status, which means that traffic won't be routed to the specified +Region. A value of 100 indicates an active status, which means that traffic will be routed +to the specified Region. At least one Region must be active at all times. When the routing +configuration is changed, any in-progress operations (uploads, copies, deletes, and so on) +to formerly active Regions will continue to run to their final completion state (success or +failure). The routing configurations of any Regions that aren’t specified remain +unchanged. Updated routing configurations might not be immediately applied. It can take up +to 2 minutes for your changes to take effect. To submit routing control changes and +failover requests, use the Amazon S3 failover control infrastructure endpoints in these +five Amazon Web Services Regions: us-east-1 us-west-2 ap-southeast-2 +ap-northeast-1 eu-west-1 # Arguments - `route`: The different routes that make up the new route configuration. Active routes @@ -3872,13 +5308,210 @@ function submit_multi_region_access_point_routes( ) end +""" + tag_resource(tag, resource_arn, x-amz-account-id) + tag_resource(tag, resource_arn, x-amz-account-id, params::Dict{String,<:Any}) + + Creates a new Amazon Web Services resource tag or updates an existing resource tag. Each +tag is a label consisting of a user-defined key and value. Tags can help you manage, +identify, organize, search for, and filter resources. You can add up to 50 Amazon Web +Services resource tags for each S3 resource. This operation is only supported for S3 +Storage Lens groups and for S3 Access Grants. The tagged resource can be an S3 Storage Lens +group or S3 Access Grants instance, registered location, or grant. Permissions You must +have the s3:TagResource permission to use this operation. For more information about the +required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage +Lens groups. For information about S3 Tagging errors, see List of Amazon S3 Tagging error +codes. + +# Arguments +- `tag`: The Amazon Web Services resource tags that you want to add to the specified S3 + resource. +- `resource_arn`: The Amazon Resource Name (ARN) of the S3 resource that you're trying to + add tags to. The tagged resource can be an S3 Storage Lens group or S3 Access Grants + instance, registered location, or grant. +- `x-amz-account-id`: The Amazon Web Services account ID that created the S3 resource that + you're trying to add tags to or the requester's account ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The Amazon Web Services resource tags that you want to add to the specified S3 + resource. +""" +function tag_resource( + Tag, resourceArn, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "POST", + "/v20180820/tags/$(resourceArn)", + Dict{String,Any}( + "Tag" => Tag, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + Tag, + resourceArn, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "POST", + "/v20180820/tags/$(resourceArn)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Tag" => Tag, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys, x-amz-account-id) + untag_resource(resource_arn, tag_keys, x-amz-account-id, params::Dict{String,<:Any}) + + This operation removes the specified Amazon Web Services resource tags from an S3 +resource. Each tag is a label consisting of a user-defined key and value. Tags can help you +manage, identify, organize, search for, and filter resources. This operation is only +supported for S3 Storage Lens groups and for S3 Access Grants. The tagged resource can be +an S3 Storage Lens group or S3 Access Grants instance, registered location, or grant. +Permissions You must have the s3:UntagResource permission to use this operation. For +more information about the required Storage Lens Groups permissions, see Setting account +permissions to use S3 Storage Lens groups. For information about S3 Tagging errors, see +List of Amazon S3 Tagging error codes. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the S3 resource that you're trying to + remove the tags from. +- `tag_keys`: The array of tag key-value pairs that you're trying to remove from of the S3 + resource. +- `x-amz-account-id`: The Amazon Web Services account ID that owns the resource that + you're trying to remove the tags from. + +""" +function untag_resource( + resourceArn, + tagKeys, + x_amz_account_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/tags/$(resourceArn)", + Dict{String,Any}( + "tagKeys" => tagKeys, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "DELETE", + "/v20180820/tags/$(resourceArn)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "tagKeys" => tagKeys, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_access_grants_location(iamrole_arn, id, x-amz-account-id) + update_access_grants_location(iamrole_arn, id, x-amz-account-id, params::Dict{String,<:Any}) + +Updates the IAM role of a registered location in your S3 Access Grants instance. +Permissions You must have the s3:UpdateAccessGrantsLocation permission to use this +operation. Additional Permissions You must also have the following permission: +iam:PassRole + +# Arguments +- `iamrole_arn`: The Amazon Resource Name (ARN) of the IAM role for the registered + location. S3 Access Grants assumes this role to manage access to the registered location. +- `id`: The ID of the registered location that you are updating. S3 Access Grants assigns + this ID when you register the location. S3 Access Grants assigns the ID default to the + default location s3:// and assigns an auto-generated ID to other locations that you + register. The ID of the registered location to which you are granting access. S3 Access + Grants assigned this ID when you registered the location. S3 Access Grants assigns the ID + default to the default location s3:// and assigns an auto-generated ID to other locations + that you register. If you are passing the default location, you cannot create an access + grant for the entire default location. You must also specify a bucket or a bucket and + prefix in the Subprefix field. +- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. + +""" +function update_access_grants_location( + IAMRoleArn, id, x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "PUT", + "/v20180820/accessgrantsinstance/location/$(id)", + Dict{String,Any}( + "IAMRoleArn" => IAMRoleArn, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_access_grants_location( + IAMRoleArn, + id, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "PUT", + "/v20180820/accessgrantsinstance/location/$(id)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IAMRoleArn" => IAMRoleArn, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_job_priority(id, priority, x-amz-account-id) update_job_priority(id, priority, x-amz-account-id, params::Dict{String,<:Any}) Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch -Operations in the Amazon S3 User Guide. Related actions include: CreateJob ListJobs - DescribeJob UpdateJobStatus +Operations in the Amazon S3 User Guide. Permissions To use the UpdateJobPriority +operation, you must have permission to perform the s3:UpdateJobPriority action. Related +actions include: CreateJob ListJobs DescribeJob UpdateJobStatus # Arguments - `id`: The ID for the job whose priority you want to update. @@ -3930,10 +5563,11 @@ end update_job_status(id, requested_job_status, x-amz-account-id) update_job_status(id, requested_job_status, x-amz-account-id, params::Dict{String,<:Any}) -Updates the status for the specified job. Use this action to confirm that you want to run a -job or to cancel an existing job. For more information, see S3 Batch Operations in the -Amazon S3 User Guide. Related actions include: CreateJob ListJobs DescribeJob - UpdateJobStatus +Updates the status for the specified job. Use this operation to confirm that you want to +run a job or to cancel an existing job. For more information, see S3 Batch Operations in +the Amazon S3 User Guide. Permissions To use the UpdateJobStatus operation, you must have +permission to perform the s3:UpdateJobStatus action. Related actions include: +CreateJob ListJobs DescribeJob UpdateJobStatus # Arguments - `id`: The ID of the job whose status you want to update. @@ -3987,3 +5621,61 @@ function update_job_status( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_storage_lens_group(storage_lens_group, name, x-amz-account-id) + update_storage_lens_group(storage_lens_group, name, x-amz-account-id, params::Dict{String,<:Any}) + + Updates the existing Storage Lens group. To use this operation, you must have the +permission to perform the s3:UpdateStorageLensGroup action. For more information about the +required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage +Lens groups. For information about Storage Lens groups errors, see List of Amazon S3 +Storage Lens error codes. + +# Arguments +- `storage_lens_group`: The JSON file that contains the Storage Lens group configuration. +- `name`: The name of the Storage Lens group that you want to update. +- `x-amz-account-id`: The Amazon Web Services account ID of the Storage Lens group owner. + +""" +function update_storage_lens_group( + StorageLensGroup, + name, + x_amz_account_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "PUT", + "/v20180820/storagelensgroup/$(name)", + Dict{String,Any}( + "StorageLensGroup" => StorageLensGroup, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_storage_lens_group( + StorageLensGroup, + name, + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "PUT", + "/v20180820/storagelensgroup/$(name)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "StorageLensGroup" => StorageLensGroup, + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index 80526ac426..bf951fe784 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -76,13 +76,13 @@ API, but not to training jobs that the hyperparameter tuning job launched before this API. To make sure that the tags associated with a hyperparameter tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you first create the tuning job by specifying them in the Tags parameter of -CreateHyperParameterTuningJob Tags that you add to a SageMaker Studio Domain or User -Profile by calling this API are also added to any Apps that the Domain or User Profile -launches after you call this API, but not to Apps that the Domain or User Profile launched -before you called this API. To make sure that the tags associated with a Domain or User -Profile are also added to all Apps that the Domain or User Profile launches, add the tags -when you first create the Domain or User Profile by specifying them in the Tags parameter -of CreateDomain or CreateUserProfile. +CreateHyperParameterTuningJob Tags that you add to a SageMaker Domain or User Profile by +calling this API are also added to any Apps that the Domain or User Profile launches after +you call this API, but not to Apps that the Domain or User Profile launched before you +called this API. To make sure that the tags associated with a Domain or User Profile are +also added to all Apps that the Domain or User Profile launches, add the tags when you +first create the Domain or User Profile by specifying them in the Tags parameter of +CreateDomain or CreateUserProfile. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. @@ -342,8 +342,8 @@ end create_app(app_name, app_type, domain_id, params::Dict{String,<:Any}) Creates a running app for the specified UserProfile. This operation is automatically -invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new -kernel configurations are selected by the user. A user may have multiple Apps active +invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel +configurations are selected by the user. A user may have multiple Apps active simultaneously. # Arguments @@ -406,14 +406,20 @@ end create_app_image_config(app_image_config_name, params::Dict{String,<:Any}) Creates a configuration for running a SageMaker image as a KernelGateway app. The -configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, -and a list of the kernels in the image. +configuration specifies the Amazon Elastic File System storage volume on the image, and a +list of the kernels in the image. # Arguments - `app_image_config_name`: The name of the AppImageConfig. Must be unique to your account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CodeEditorAppImageConfig"`: The CodeEditorAppImageConfig. You can only specify one + image kernel in the AppImageConfig API. This kernel is shown to users before the image + starts. After the image runs, all kernels are visible in Code Editor. +- `"JupyterLabAppImageConfig"`: The JupyterLabAppImageConfig. You can only specify one + image kernel in the AppImageConfig API. This kernel is shown to users before the image + starts. After the image runs, all kernels are visible in JupyterLab. - `"KernelGatewayImageConfig"`: The KernelGatewayImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel will be shown to users before the image starts. Once the image runs, all kernels are visible in JupyterLab. @@ -500,9 +506,15 @@ end create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn) create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Autopilot job. Find the best-performing model after you run an Autopilot job by -calling DescribeAutoMLJob. For information about how to use Autopilot, see Automate Model -Development with Amazon SageMaker Autopilot. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We +recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer +backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to +those of its previous version CreateAutoMLJob, as well as time-series forecasting, +non-tabular problem types such as image or text classification, and text generation (LLMs +fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 +in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model +after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or +DescribeAutoMLJob. # Arguments - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account @@ -519,16 +531,16 @@ Development with Amazon SageMaker Autopilot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobConfig"`: A collection of settings used to configure an AutoML job. -- `"AutoMLJobObjective"`: Defines the objective metric used to measure the predictive - quality of an AutoML job. You provide an AutoMLJobObjectiveMetricName and Autopilot infers - whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported. +- `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a + job. If not specified, the default objective metric depends on the problem type. See + AutoMLJobObjective for the default values. - `"GenerateCandidateDefinitionsOnly"`: Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings. - `"ModelDeployConfig"`: Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment. - `"ProblemType"`: Defines the type of supervised learning problem available for the - candidates. For more information, see Amazon SageMaker Autopilot problem types. + candidates. For more information, see SageMaker Autopilot problem types. - `"Tags"`: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web ServicesResources. Tag keys must be unique per @@ -584,18 +596,25 @@ end create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn) create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text -for Computer Vision or Natural Language Processing problems. Find the resulting model after -you run an AutoML job V2 by calling DescribeAutoMLJobV2. To create an AutoMLJob using -tabular data, see CreateAutoMLJob. This API action is callable through SageMaker Canvas -only. Calling it directly from the CLI or an SDK results in an error. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. +CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and +DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular +problem types identical to those of its previous version CreateAutoMLJob, as well as +time-series forecasting, non-tabular problem types such as image or text classification, +and text generation (LLMs fine-tuning). Find guidelines about how to migrate a +CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. +For the list of available problem types supported by CreateAutoMLJobV2, see +AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job +V2 by calling DescribeAutoMLJobV2. # Arguments - `auto_mljob_input_data_config`: An array of channel objects describing the input data and - their location. Each channel is a named input source. Similar to InputDataConfig supported - by CreateAutoMLJob. The supported formats depend on the problem type: - ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile TextClassification: - S3Prefix + their location. Each channel is a named input source. Similar to the InputDataConfig + attribute in the CreateAutoMLJob input parameters. The supported formats depend on the + problem type: For tabular problem types: S3Prefix, ManifestFile. For image + classification: S3Prefix, ManifestFile, AugmentedManifestFile. For text classification: + S3Prefix. For time-series forecasting: S3Prefix. For text generation (LLMs + fine-tuning): S3Prefix. - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account and is case insensitive. - `auto_mlproblem_type_config`: Defines the configuration settings of one of the supported @@ -607,13 +626,22 @@ only. Calling it directly from the CLI or an SDK results in an error. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a - job. For CreateAutoMLJobV2, only Accuracy is supported. + job. If not specified, the default objective metric depends on the problem type. For the + list of default values per problem type, see AutoMLJobObjective. For tabular problem + types: You must either provide both the AutoMLJobObjective and indicate the type of + supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or + none at all. For text generation problem types (LLMs fine-tuning): Fine-tuning language + models in Autopilot does not require setting the AutoMLJobObjective field. Autopilot + fine-tunes LLMs without requiring multiple candidates to be trained and evaluated. Instead, + using your dataset, Autopilot directly fine-tunes your target model to enhance a default + objective metric, the cross-entropy loss. After fine-tuning a language model, you can + evaluate the quality of its generated text using different metrics. For a list of the + available metrics, see Metrics for fine-tuning LLMs in Autopilot. - `"DataSplitConfig"`: This structure specifies how to split the data into train and - validation datasets. If you are using the V1 API (for example CreateAutoMLJob) or the V2 - API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a - TextClassificationJobConfig problem type), the validation and training datasets must - contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 - GB in size. + validation datasets. The validation and training datasets must contain the same headers. + For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB + in size. This attribute must not be set for the time-series forecasting problem type, as + Autopilot automatically splits the input dataset into training and validation sets. - `"ModelDeployConfig"`: Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment. - `"SecurityConfig"`: The security configuration for traffic encryption or Amazon VPC @@ -672,6 +700,59 @@ function create_auto_mljob_v2( ) end +""" + create_cluster(cluster_name, instance_groups) + create_cluster(cluster_name, instance_groups, params::Dict{String,<:Any}) + +Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for +creating and managing persistent clusters for developing large machine learning models, +such as large language models (LLMs) and diffusion models. To learn more, see Amazon +SageMaker HyperPod in the Amazon SageMaker Developer Guide. + +# Arguments +- `cluster_name`: The name for the new SageMaker HyperPod cluster. +- `instance_groups`: The instance groups to be created in the SageMaker HyperPod cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web + Services resource. You can add tags to your cluster in the same way you add them in other + Amazon Web Services services that support tagging. To learn more about tagging Amazon Web + Services resources in general, see Tagging Amazon Web Services Resources User Guide. +- `"VpcConfig"`: +""" +function create_cluster( + ClusterName, InstanceGroups; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "CreateCluster", + Dict{String,Any}("ClusterName" => ClusterName, "InstanceGroups" => InstanceGroups); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cluster( + ClusterName, + InstanceGroups, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterName" => ClusterName, "InstanceGroups" => InstanceGroups + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_code_repository(code_repository_name, git_config) create_code_repository(code_repository_name, git_config, params::Dict{String,<:Any}) @@ -905,7 +986,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys monitoring job. - `"NetworkConfig"`: Specifies networking configuration for the monitoring job. - `"StoppingCondition"`: -- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost +- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. """ function create_data_quality_job_definition( @@ -1021,29 +1102,29 @@ end create_domain(auth_mode, default_user_settings, domain_name, subnet_ids, vpc_id) create_domain(auth_mode, default_user_settings, domain_name, subnet_ids, vpc_id, params::Dict{String,<:Any}) -Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon -Elastic File System (EFS) volume, a list of authorized users, and a variety of security, -application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a -domain can share notebook files and other artifacts with each other. EFS storage When a -domain is created, an EFS volume is created for use by all of the users within the domain. -Each user receives a private home directory within the EFS volume for notebooks, Git -repositories, and data files. SageMaker uses the Amazon Web Services Key Management Service -(Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon -Web Services managed key by default. For more control, you can specify a customer managed -key. For more information, see Protect Data at Rest Using Encryption. VPC configuration -All SageMaker Studio traffic between the domain and the EFS volume is through the specified -VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType -parameter. AppNetworkAccessType corresponds to the network access type that you choose when -you onboard to Studio. The following options are available: PublicInternetOnly - Non-EFS -traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This -is the default value. VpcOnly - All Studio traffic is through the specified VPC and -subnets. Internet access is disabled by default. To allow internet access, you must specify -a NAT gateway. When internet access is disabled, you won't be able to run a Studio notebook -or to train or host models unless your VPC has an interface endpoint to the SageMaker API -and runtime or a NAT gateway and your security groups allow outbound connections. NFS -traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in -order to launch a SageMaker Studio app successfully. For more information, see Connect -SageMaker Studio Notebooks to Resources in a VPC. +Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a +list of authorized users, and a variety of security, application, policy, and Amazon +Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files +and other artifacts with each other. EFS storage When a domain is created, an EFS volume +is created for use by all of the users within the domain. Each user receives a private home +directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker +uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt +the EFS volume attached to the domain with an Amazon Web Services managed key by default. +For more control, you can specify a customer managed key. For more information, see Protect +Data at Rest Using Encryption. VPC configuration All traffic between the domain and the +Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can +specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network +access type that you choose when you onboard to the domain. The following options are +available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon +SageMaker, which allows internet access. This is the default value. VpcOnly - All +traffic is through the specified VPC and subnets. Internet access is disabled by default. +To allow internet access, you must specify a NAT gateway. When internet access is disabled, +you won't be able to run a Amazon SageMaker Studio notebook or to train or host models +unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway +and your security groups allow outbound connections. NFS traffic over TCP on port 2049 +needs to be allowed in both inbound and outbound rules in order to launch a Amazon +SageMaker Studio app successfully. For more information, see Connect Amazon SageMaker +Studio Notebooks to Resources in a VPC. # Arguments - `auth_mode`: The mode of authentication that members use to access the domain. @@ -1052,16 +1133,16 @@ SageMaker Studio Notebooks to Resources in a VPC. aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain. - `domain_name`: A name for the domain. -- `subnet_ids`: The VPC subnets that Studio uses for communication. -- `vpc_id`: The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for +- `subnet_ids`: The VPC subnets that the domain uses for communication. +- `vpc_id`: The ID of the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AppNetworkAccessType"`: Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by - Amazon SageMaker, which allows direct internet access VpcOnly - All Studio traffic is - through the specified VPC and subnets + Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through + the specified VPC and subnets - `"AppSecurityGroupManagement"`: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and @@ -1070,9 +1151,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DefaultSpaceSettings"`: The default settings used to create a space. - `"DomainSettings"`: A collection of Domain settings. - `"HomeEfsFileSystemKmsKeyId"`: Use KmsKeyId. -- `"KmsKeyId"`: SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached - to the domain with an Amazon Web Services managed key by default. For more control, specify - a customer managed key. +- `"KmsKeyId"`: SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes + attached to the domain with an Amazon Web Services managed key by default. For more + control, specify a customer managed key. - `"Tags"`: Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. Tags that you specify for the Domain are also added to all Apps that the Domain launches. @@ -1318,40 +1399,38 @@ end Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API. Use this API to deploy models using -SageMaker hosting services. For an example that calls this method when deploying a model -to SageMaker hosting services, see the Create Endpoint example notebook. You must not -delete an EndpointConfig that is in use by an endpoint that is live or while the -UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update -an endpoint, you must create a new EndpointConfig. The endpoint name must be unique within -an Amazon Web Services Region in your Amazon Web Services account. When it receives the -request, SageMaker creates the endpoint, launches the resources (ML compute instances), and -deploys the model(s) on them. When you call CreateEndpoint, a load call is made to -DynamoDB to verify that your endpoint configuration exists. When you read data from a -DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the -results of a recently completed write operation. The response might include some stale -data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If -you repeat your read request after a short time, the response should return the latest -data. So retry logic is recommended to handle these possible issues. We also recommend that -customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the -potential impact of a DynamoDB eventually consistent read. When SageMaker receives the -request, it sets the endpoint status to Creating. After it creates the endpoint, it sets -the status to InService. SageMaker can then process incoming requests for inferences. To -check the status of an endpoint, use the DescribeEndpoint API. If any of the models hosted -at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web -Services Security Token Service to download model artifacts from the S3 path you provided. -Amazon Web Services STS is activated in your Amazon Web Services account by default. If you -previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon -Web Services STS for that region. For more information, see Activating and Deactivating -Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services -Identity and Access Management User Guide. To add the IAM role policies for using this -API operation, go to the IAM console, and choose Roles in the left navigation pane. Search -the IAM role that you want to grant access to use the CreateEndpoint and -CreateEndpointConfig API operations, add the following policies to the role. Option 1: -For a full SageMaker access, search and attach the AmazonSageMakerFullAccess policy. -Option 2: For granting a limited access to an IAM role, paste the following Action elements -manually into the JSON file of the IAM role: \"Action\": [\"sagemaker:CreateEndpoint\", -\"sagemaker:CreateEndpointConfig\"] \"Resource\": [ -\"arn:aws:sagemaker:region:account-id:endpoint/endpointName\" +SageMaker hosting services. You must not delete an EndpointConfig that is in use by an +endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being +performed on the endpoint. To update an endpoint, you must create a new EndpointConfig. +The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web +Services account. When it receives the request, SageMaker creates the endpoint, launches +the resources (ML compute instances), and deploys the model(s) on them. When you call +CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration +exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , +the response might not reflect the results of a recently completed write operation. The +response might include some stale data. If the dependent entities are not yet in DynamoDB, +this causes a validation error. If you repeat your read request after a short time, the +response should return the latest data. So retry logic is recommended to handle these +possible issues. We also recommend that customers call DescribeEndpointConfig before +calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent +read. When SageMaker receives the request, it sets the endpoint status to Creating. After +it creates the endpoint, it sets the status to InService. SageMaker can then process +incoming requests for inferences. To check the status of an endpoint, use the +DescribeEndpoint API. If any of the models hosted at this endpoint get model data from an +Amazon S3 location, SageMaker uses Amazon Web Services Security Token Service to download +model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your +Amazon Web Services account by default. If you previously deactivated Amazon Web Services +STS for a region, you need to reactivate Amazon Web Services STS for that region. For more +information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web +Services Region in the Amazon Web Services Identity and Access Management User Guide. To +add the IAM role policies for using this API operation, go to the IAM console, and choose +Roles in the left navigation pane. Search the IAM role that you want to grant access to use +the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to +the role. Option 1: For a full SageMaker access, search and attach the +AmazonSageMakerFullAccess policy. Option 2: For granting a limited access to an IAM role, +paste the following Action elements manually into the JSON file of the IAM role: +\"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"] +\"Resource\": [ \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\" \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\" ] For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference. @@ -1442,6 +1521,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys asynchronous inference. This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync. - `"DataCaptureConfig"`: +- `"EnableNetworkIsolation"`: Sets whether all model containers deployed to the endpoint + are isolated. If they are, no inbound or outbound network calls can be made to or from the + model containers. +- `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker + can assume to perform actions on your behalf. For more information, see SageMaker Roles. + To be able to pass this role to Amazon SageMaker, the caller of this action must have the + iam:PassRole permission. - `"ExplainerConfig"`: A member of CreateEndpointConfig that enables explainers. - `"KmsKeyId"`: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML @@ -1468,6 +1554,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. +- `"VpcConfig"`: """ function create_endpoint_config( EndpointConfigName, @@ -1574,8 +1661,10 @@ FeatureStore to describe a Record. The FeatureGroup defines the schema and feat contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the -FeatureGroups quota for your Amazon Web Services account. You must include at least one of -OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. +FeatureGroups quota for your Amazon Web Services account. Note that it can take +approximately 10-15 minutes to provision an OnlineStore FeatureGroup with the InMemory +StorageType. You must include at least one of OnlineStoreConfig and OfflineStoreConfig to +create a FeatureGroup. # Arguments - `event_time_feature_name`: The name of the feature that stores the EventTime of a Record @@ -1592,15 +1681,15 @@ OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. cannot be any of the following: is_deleted, write_time, api_invocation_time You can create up to 2,500 FeatureDefinitions per FeatureGroup. - `feature_group_name`: The name of the FeatureGroup. The name must be unique within an - Amazon Web Services Region in an Amazon Web Services account. The name: Must start and - end with an alphanumeric character. Can only contain alphanumeric character and hyphens. - Spaces are not allowed. + Amazon Web Services Region in an Amazon Web Services account. The name: Must start with + an alphanumeric character. Can only include alphanumeric characters, underscores, and + hyphens. Spaces are not allowed. - `record_identifier_feature_name`: The name of the Feature whose value uniquely identifies a Record defined in the FeatureStore. Only the latest record per identifier value will be stored in the OnlineStore. RecordIdentifierFeatureName must be one of feature definitions' names. You use the RecordIdentifierFeatureName to access data in a FeatureStore. This name: - Must start and end with an alphanumeric character. Can only contains alphanumeric - characters, hyphens, underscores. Spaces are not allowed. + Must start with an alphanumeric character. Can only contains alphanumeric characters, + hyphens, underscores. Spaces are not allowed. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1620,6 +1709,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"RoleArn"`: The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided. - `"Tags"`: Tags used to identify Features in each FeatureGroup. +- `"ThroughputConfig"`: """ function create_feature_group( EventTimeFeatureName, @@ -1668,15 +1758,13 @@ function create_feature_group( end """ - create_flow_definition(flow_definition_name, human_loop_config, output_config, role_arn) - create_flow_definition(flow_definition_name, human_loop_config, output_config, role_arn, params::Dict{String,<:Any}) + create_flow_definition(flow_definition_name, output_config, role_arn) + create_flow_definition(flow_definition_name, output_config, role_arn, params::Dict{String,<:Any}) Creates a flow definition. # Arguments - `flow_definition_name`: The name of your flow definition. -- `human_loop_config`: An object containing information about the tasks the human reviewers - will perform. - `output_config`: An object containing information about where the human review results will be uploaded. - `role_arn`: The Amazon Resource Name (ARN) of the role needed to call other services on @@ -1687,6 +1775,8 @@ Creates a flow definition. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HumanLoopActivationConfig"`: An object containing information about the events that trigger a human workflow. +- `"HumanLoopConfig"`: An object containing information about the tasks the human reviewers + will perform. - `"HumanLoopRequestSource"`: Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. - `"Tags"`: An array of key-value pairs that contain metadata to help you categorize and @@ -1695,7 +1785,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys """ function create_flow_definition( FlowDefinitionName, - HumanLoopConfig, OutputConfig, RoleArn; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1704,7 +1793,6 @@ function create_flow_definition( "CreateFlowDefinition", Dict{String,Any}( "FlowDefinitionName" => FlowDefinitionName, - "HumanLoopConfig" => HumanLoopConfig, "OutputConfig" => OutputConfig, "RoleArn" => RoleArn, ); @@ -1714,7 +1802,6 @@ function create_flow_definition( end function create_flow_definition( FlowDefinitionName, - HumanLoopConfig, OutputConfig, RoleArn, params::AbstractDict{String}; @@ -1727,7 +1814,6 @@ function create_flow_definition( _merge, Dict{String,Any}( "FlowDefinitionName" => FlowDefinitionName, - "HumanLoopConfig" => HumanLoopConfig, "OutputConfig" => OutputConfig, "RoleArn" => RoleArn, ), @@ -1743,7 +1829,7 @@ end create_hub(hub_description, hub_name) create_hub(hub_description, hub_name, params::Dict{String,<:Any}) -Create a hub. Hub APIs are only callable through SageMaker Studio. +Create a hub. # Arguments - `hub_description`: A description of the hub. @@ -1786,6 +1872,59 @@ function create_hub( ) end +""" + create_hub_content_reference(hub_name, sage_maker_public_hub_content_arn) + create_hub_content_reference(hub_name, sage_maker_public_hub_content_arn, params::Dict{String,<:Any}) + +Create a hub content reference in order to add a model in the JumpStart public hub to a +private hub. + +# Arguments +- `hub_name`: The name of the hub to add the hub content reference to. +- `sage_maker_public_hub_content_arn`: The ARN of the public hub content to reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HubContentName"`: The name of the hub content to reference. +- `"MinVersion"`: The minimum version of the hub content to reference. +- `"Tags"`: Any tags associated with the hub content to reference. +""" +function create_hub_content_reference( + HubName, SageMakerPublicHubContentArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "CreateHubContentReference", + Dict{String,Any}( + "HubName" => HubName, + "SageMakerPublicHubContentArn" => SageMakerPublicHubContentArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_hub_content_reference( + HubName, + SageMakerPublicHubContentArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateHubContentReference", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "HubName" => HubName, + "SageMakerPublicHubContentArn" => SageMakerPublicHubContentArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_human_task_ui(human_task_ui_name, ui_template) create_human_task_ui(human_task_ui_name, ui_template, params::Dict{String,<:Any}) @@ -1941,8 +2080,8 @@ end create_image(image_name, role_arn, params::Dict{String,<:Any}) Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image -version represents a container image stored in Amazon Elastic Container Registry (ECR). For -more information, see Bring your own SageMaker image. +version represents a container image stored in Amazon ECR. For more information, see Bring +your own SageMaker image. # Arguments - `image_name`: The name of the image. Must be unique to your account. @@ -1988,13 +2127,12 @@ end create_image_version(base_image, client_token, image_name, params::Dict{String,<:Any}) Creates a version of the SageMaker image specified by ImageName. The version represents the -Amazon Elastic Container Registry (ECR) container image specified by BaseImage. +Amazon ECR container image specified by BaseImage. # Arguments - `base_image`: The registry path of the container image to use as the starting point for - this version. The path is an Amazon Elastic Container Registry (ECR) URI in the following - format: <acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or - [@digest]> + this version. The path is an Amazon ECR URI in the following format: + <acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]> - `client_token`: A unique ID. If not specified, the Amazon Web Services CLI and Amazon Web Services SDKs, such as the SDK for Python (Boto3), add a unique value to the call. - `image_name`: The ImageName of the Image to create a version of. @@ -2056,6 +2194,84 @@ function create_image_version( ) end +""" + create_inference_component(endpoint_name, inference_component_name, runtime_config, specification, variant_name) + create_inference_component(endpoint_name, inference_component_name, runtime_config, specification, variant_name, params::Dict{String,<:Any}) + +Creates an inference component, which is a SageMaker hosting object that you can use to +deploy a model to an endpoint. In the inference component settings, you specify the model, +the endpoint, and how the model utilizes the resources that the endpoint hosts. You can +optimize resource utilization by tailoring how the required CPU cores, accelerators, and +memory are allocated. You can deploy multiple inference components to an endpoint, where +each inference component contains one model and the resource utilization needs for that +individual model. After you deploy an inference component, you can directly invoke the +associated model when you use the InvokeEndpoint API action. + +# Arguments +- `endpoint_name`: The name of an existing endpoint where you host the inference component. +- `inference_component_name`: A unique name to assign to the inference component. +- `runtime_config`: Runtime settings for a model that is deployed with an inference + component. +- `specification`: Details about the resources to deploy with this inference component, + including the model, container, and compute resources. +- `variant_name`: The name of an existing production variant where you host the inference + component. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: A list of key-value pairs associated with the model. For more information, see + Tagging Amazon Web Services resources in the Amazon Web Services General Reference. +""" +function create_inference_component( + EndpointName, + InferenceComponentName, + RuntimeConfig, + Specification, + VariantName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateInferenceComponent", + Dict{String,Any}( + "EndpointName" => EndpointName, + "InferenceComponentName" => InferenceComponentName, + "RuntimeConfig" => RuntimeConfig, + "Specification" => Specification, + "VariantName" => VariantName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_inference_component( + EndpointName, + InferenceComponentName, + RuntimeConfig, + Specification, + VariantName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateInferenceComponent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EndpointName" => EndpointName, + "InferenceComponentName" => InferenceComponentName, + "RuntimeConfig" => RuntimeConfig, + "Specification" => Specification, + "VariantName" => VariantName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_inference_experiment(endpoint_name, model_variants, name, role_arn, shadow_mode_config, type) create_inference_experiment(endpoint_name, model_variants, name, role_arn, shadow_mode_config, type, params::Dict{String,<:Any}) @@ -2411,8 +2627,88 @@ function create_labeling_job( end """ - create_model(execution_role_arn, model_name) - create_model(execution_role_arn, model_name, params::Dict{String,<:Any}) + create_mlflow_tracking_server(artifact_store_uri, role_arn, tracking_server_name) + create_mlflow_tracking_server(artifact_store_uri, role_arn, tracking_server_name, params::Dict{String,<:Any}) + +Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact +store. For more information, see Create an MLflow Tracking Server. + +# Arguments +- `artifact_store_uri`: The S3 URI for a general purpose bucket to use as the MLflow + Tracking Server artifact store. +- `role_arn`: The Amazon Resource Name (ARN) for an IAM role in your account that the + MLflow Tracking Server uses to access the artifact store in Amazon S3. The role should have + AmazonS3FullAccess permissions. For more information on IAM permissions for tracking server + creation, see Set up IAM permissions for MLflow. +- `tracking_server_name`: A unique string identifying the tracking server name. This string + is part of the tracking server ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AutomaticModelRegistration"`: Whether to enable or disable automatic registration of + new MLflow models to the SageMaker Model Registry. To enable automatic model registration, + set this value to True. To disable automatic model registration, set this value to False. + If not specified, AutomaticModelRegistration defaults to False. +- `"MlflowVersion"`: The version of MLflow that the tracking server uses. To see which + MLflow versions are available to use, see How it works. +- `"Tags"`: Tags consisting of key-value pairs used to manage metadata for the tracking + server. +- `"TrackingServerSize"`: The size of the tracking server you want to create. You can + choose between \"Small\", \"Medium\", and \"Large\". The default MLflow Tracking Server + configuration size is \"Small\". You can choose a size depending on the projected use of + the tracking server such as the volume of data logged, number of users, and frequency of + use. We recommend using a small tracking server for teams of up to 25 users, a medium + tracking server for teams of up to 50 users, and a large tracking server for teams of up to + 100 users. +- `"WeeklyMaintenanceWindowStart"`: The day and time of the week in Coordinated Universal + Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For + example: TUE:03:30. +""" +function create_mlflow_tracking_server( + ArtifactStoreUri, + RoleArn, + TrackingServerName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateMlflowTrackingServer", + Dict{String,Any}( + "ArtifactStoreUri" => ArtifactStoreUri, + "RoleArn" => RoleArn, + "TrackingServerName" => TrackingServerName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_mlflow_tracking_server( + ArtifactStoreUri, + RoleArn, + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ArtifactStoreUri" => ArtifactStoreUri, + "RoleArn" => RoleArn, + "TrackingServerName" => TrackingServerName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_model(model_name) + create_model(model_name, params::Dict{String,<:Any}) Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference @@ -2421,23 +2717,16 @@ uses when you deploy the model for predictions. Use this API to create a model i to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you -defined for the model in the hosting environment. For an example that calls this method -when deploying a model to SageMaker hosting services, see Create a Model (Amazon Web -Services SDK for Python (Boto 3)). To run a batch transform using your model, you start a -job with the CreateTransformJob API. SageMaker uses your model and your dataset to get -inferences which are then saved to a specified S3 location. In the request, you also -provide an IAM role that SageMaker can assume to access model artifacts and docker image -for deployment on ML compute hosting instances or for batch transform jobs. In addition, -you also use the IAM role to manage permissions the inference code needs. For example, if -the inference code access any other Amazon Web Services resources, you grant necessary -permissions via this role. - -# Arguments -- `execution_role_arn`: The Amazon Resource Name (ARN) of the IAM role that SageMaker can - assume to access model artifacts and docker image for deployment on ML compute instances or - for batch transform jobs. Deploying on ML compute instances is part of model hosting. For - more information, see SageMaker Roles. To be able to pass this role to SageMaker, the - caller of this API must have the iam:PassRole permission. +defined for the model in the hosting environment. To run a batch transform using your +model, you start a job with the CreateTransformJob API. SageMaker uses your model and your +dataset to get inferences which are then saved to a specified S3 location. In the request, +you also provide an IAM role that SageMaker can assume to access model artifacts and docker +image for deployment on ML compute hosting instances or for batch transform jobs. In +addition, you also use the IAM role to manage permissions the inference code needs. For +example, if the inference code access any other Amazon Web Services resources, you grant +necessary permissions via this role. + +# Arguments - `model_name`: The name of the new model. # Optional Parameters @@ -2445,6 +2734,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Containers"`: Specifies the containers in the inference pipeline. - `"EnableNetworkIsolation"`: Isolates the model container. No inbound or outbound network calls can be made to or from the model container. +- `"ExecutionRoleArn"`: The Amazon Resource Name (ARN) of the IAM role that SageMaker can + assume to access model artifacts and docker image for deployment on ML compute instances or + for batch transform jobs. Deploying on ML compute instances is part of model hosting. For + more information, see SageMaker Roles. To be able to pass this role to SageMaker, the + caller of this API must have the iam:PassRole permission. - `"InferenceExecutionConfig"`: Specifies details of how containers in a multi-container endpoint are called. - `"PrimaryContainer"`: The location of the primary docker image containing inference code, @@ -2459,18 +2753,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Protect Endpoints by Using an Amazon Virtual Private Cloud and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud. """ -function create_model( - ExecutionRoleArn, ModelName; aws_config::AbstractAWSConfig=global_aws_config() -) +function create_model(ModelName; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( "CreateModel", - Dict{String,Any}("ExecutionRoleArn" => ExecutionRoleArn, "ModelName" => ModelName); + Dict{String,Any}("ModelName" => ModelName); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_model( - ExecutionRoleArn, ModelName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -2478,13 +2769,7 @@ function create_model( return sagemaker( "CreateModel", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ExecutionRoleArn" => ExecutionRoleArn, "ModelName" => ModelName - ), - params, - ), + mergewith(_merge, Dict{String,Any}("ModelName" => ModelName), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2513,7 +2798,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ModelBiasBaselineConfig"`: The baseline configuration for a model bias job. - `"NetworkConfig"`: Networking options for a model bias job. - `"StoppingCondition"`: -- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost +- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. """ function create_model_bias_job_definition( @@ -2643,7 +2928,7 @@ Creates an Amazon SageMaker Model Card export job. # Arguments - `model_card_export_job_name`: The name of the model card export job. -- `model_card_name`: The name of the model card to export. +- `model_card_name`: The name or Amazon Resource Name (ARN) of the model card to export. - `output_config`: The model card output configuration that specifies the Amazon S3 path for exporting. @@ -2717,7 +3002,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys explainability job. - `"NetworkConfig"`: Networking options for a model explainability job. - `"StoppingCondition"`: -- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost +- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. """ function create_model_explainability_job_definition( @@ -2809,15 +3094,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys model monitor is set using the model package. For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide. -- `"InferenceSpecification"`: Specifies details about inference jobs that can be run with - models based on this model package, including the following: The Amazon ECR paths of - containers that contain the inference code and model artifacts. The instance types that - the model package supports for transform jobs and real-time endpoints used for inference. - The input and output content formats that the model package supports for inference. +- `"InferenceSpecification"`: Specifies details about inference jobs that you can run with + models based on this model package, including the following information: The Amazon ECR + paths of containers that contain the inference code and model artifacts. The instance + types that the model package supports for transform jobs and real-time endpoints used for + inference. The input and output content formats that the model package supports for + inference. - `"MetadataProperties"`: - `"ModelApprovalStatus"`: Whether the model is approved for deployment. This parameter is optional for versioned models, and does not apply to unversioned models. For versioned models, the value of this parameter must be set to Approved to deploy the model. +- `"ModelCard"`: The model card associated with the model package. Since + ModelPackageModelCard is tied to a model package, it is a specific usage of a model card + and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard + schema does not include model_package_details, and model_overview is composed of the + model_creator and model_artifact properties. For more information about the model package + model card schema, see Model package model card schema. For more information about the + model card associated with the model package, see View the Details of a Model Version. - `"ModelMetrics"`: A structure that contains model metrics reports. - `"ModelPackageDescription"`: A description of the model package. - `"ModelPackageGroupName"`: The name or Amazon Resource Name (ARN) of the model package @@ -2830,10 +3123,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint call. +- `"SecurityConfig"`: The KMS Key ID (KMSKeyId) used for encryption of model package + information. +- `"SkipModelValidation"`: Indicates if you want to skip model validation. - `"SourceAlgorithmSpecification"`: Details about the algorithm that was used to create the model package. +- `"SourceUri"`: The URI of the source for the model package. If you want to clone a model + package, set it to the model package Amazon Resource Name (ARN). If you want to register a + model, set it to the model ARN. - `"Tags"`: A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. + If you supply ModelPackageGroupName, your model package belongs to the model group you + specify and uses the tags associated with the model group. In this case, you cannot supply + a tag argument. - `"Task"`: The machine learning task your model package accomplishes. Common machine learning tasks include object detection and image classification. The following tasks are supported by Inference Recommender: \"IMAGE_CLASSIFICATION\" | \"OBJECT_DETECTION\" | @@ -2931,7 +3233,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys monitoring job. - `"NetworkConfig"`: Specifies the network configuration for the monitoring job. - `"StoppingCondition"`: -- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost +- `"Tags"`: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. """ function create_model_quality_job_definition( @@ -3179,7 +3481,7 @@ Creates a lifecycle configuration that you can associate with a notebook instanc lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance. Each lifecycle configuration script has a limit of 16384 characters. The value of the PATH environment variable that is available to both scripts is -/sbin:bin:/usr/sbin:/usr/bin. View CloudWatch Logs for notebook instance lifecycle +/sbin:bin:/usr/sbin:/usr/bin. View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the @@ -3301,19 +3603,18 @@ end create_presigned_domain_url(domain_id, user_profile_name, params::Dict{String,<:Any}) Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the -user will be automatically signed in to Amazon SageMaker Studio, and granted access to all -of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. -This operation can only be called when the authentication mode equals IAM. The IAM role or -user passed to this API defines the permissions to access the app. Once the presigned URL -is created, no additional permission is required to access this URL. IAM authorization -policies for this API are also enforced for every HTTP request and WebSocket frame that -attempts to connect to the app. You can restrict access to this API and to the URL that it -returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. -For more information, see Connect to SageMaker Studio Through an Interface VPC Endpoint . -The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 -minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL -after the timeout limit expires, you are directed to the Amazon Web Services console -sign-in page. +user will be automatically signed in to the domain, and granted access to all of the Apps +and files associated with the Domain's Amazon Elastic File System volume. This operation +can only be called when the authentication mode equals IAM. The IAM role or user passed to +this API defines the permissions to access the app. Once the presigned URL is created, no +additional permission is required to access this URL. IAM authorization policies for this +API are also enforced for every HTTP request and WebSocket frame that attempts to connect +to the app. You can restrict access to this API and to the URL that it returns to a list of +IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, +see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that +you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can +configure this value using ExpiresInSeconds. If you try to use the URL after the timeout +limit expires, you are directed to the Amazon Web Services console sign-in page. # Arguments - `domain_id`: The domain ID. @@ -3323,6 +3624,17 @@ sign-in page. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ExpiresInSeconds"`: The number of seconds until the pre-signed URL expires. This value defaults to 300. +- `"LandingUri"`: The landing page that the user is directed to when accessing the + presigned URL. Using this value, users can access Studio or Studio Classic, even if it is + not the default experience for the domain. The supported values are: + studio::relative/path: Directs users to the relative path in Studio. + app:JupyterServer:relative/path: Directs users to the relative path in the Studio Classic + application. app:JupyterLab:relative/path: Directs users to the relative path in the + JupyterLab application. app:RStudioServerPro:relative/path: Directs users to the + relative path in the RStudio application. app:CodeEditor:relative/path: Directs users to + the relative path in the Code Editor, based on Code-OSS, Visual Studio Code - Open Source + application. app:Canvas:relative/path: Directs users to the relative path in the Canvas + application. - `"SessionExpirationDurationInSeconds"`: The session expiration duration in seconds. This value defaults to 43200. - `"SpaceName"`: The name of the space. @@ -3359,6 +3671,50 @@ function create_presigned_domain_url( ) end +""" + create_presigned_mlflow_tracking_server_url(tracking_server_name) + create_presigned_mlflow_tracking_server_url(tracking_server_name, params::Dict{String,<:Any}) + +Returns a presigned URL that you can use to connect to the MLflow UI attached to your +tracking server. For more information, see Launch the MLflow UI using a presigned URL. + +# Arguments +- `tracking_server_name`: The name of the tracking server to connect to your MLflow UI. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpiresInSeconds"`: The duration in seconds that your presigned URL is valid. The + presigned URL can be used only once. +- `"SessionExpirationDurationInSeconds"`: The duration in seconds that your MLflow UI + session is valid. +""" +function create_presigned_mlflow_tracking_server_url( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "CreatePresignedMlflowTrackingServerUrl", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_presigned_mlflow_tracking_server_url( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreatePresignedMlflowTrackingServerUrl", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_presigned_notebook_instance_url(notebook_instance_name) create_presigned_notebook_instance_url(notebook_instance_name, params::Dict{String,<:Any}) @@ -3558,15 +3914,18 @@ end create_space(domain_id, space_name) create_space(domain_id, space_name, params::Dict{String,<:Any}) -Creates a space used for real time collaboration in a Domain. +Creates a private space or a space used for real time collaboration in a domain. # Arguments -- `domain_id`: The ID of the associated Domain. +- `domain_id`: The ID of the associated domain. - `space_name`: The name of the space. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OwnershipSettings"`: A collection of ownership settings. +- `"SpaceDisplayName"`: The name of the space that appears in the SageMaker Studio UI. - `"SpaceSettings"`: A collection of space settings. +- `"SpaceSharingSettings"`: A collection of space sharing settings. - `"Tags"`: Tags to associated with the space. Each tag consists of a key and an optional value. Tag keys must be unique for each resource. Tags are searchable using the Search API. """ @@ -3604,14 +3963,15 @@ end create_studio_lifecycle_config(studio_lifecycle_config_app_type, studio_lifecycle_config_content, studio_lifecycle_config_name) create_studio_lifecycle_config(studio_lifecycle_config_app_type, studio_lifecycle_config_content, studio_lifecycle_config_name, params::Dict{String,<:Any}) -Creates a new Studio Lifecycle Configuration. +Creates a new Amazon SageMaker Studio Lifecycle Configuration. # Arguments - `studio_lifecycle_config_app_type`: The App type that the Lifecycle Configuration is attached to. -- `studio_lifecycle_config_content`: The content of your Studio Lifecycle Configuration - script. This content must be base64 encoded. -- `studio_lifecycle_config_name`: The name of the Studio Lifecycle Configuration to create. +- `studio_lifecycle_config_content`: The content of your Amazon SageMaker Studio Lifecycle + Configuration script. This content must be base64 encoded. +- `studio_lifecycle_config_name`: The name of the Amazon SageMaker Studio Lifecycle + Configuration to create. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3758,6 +4118,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys include any security-sensitive information including account access IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your training job request and return an exception error. +- `"InfraCheckConfig"`: Contains information about the infrastructure health check + configuration for the training job. - `"InputDataConfig"`: An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location. Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input @@ -3772,8 +4134,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ProfilerConfig"`: - `"ProfilerRuleConfigurations"`: Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. +- `"RemoteDebugConfig"`: Configuration for remote debugging. To learn more about the remote + debugging functionality of SageMaker, see Access a training container through Amazon Web + Services Systems Manager (SSM) for remote debugging. - `"RetryStrategy"`: The number of times to retry the job when the job fails due to an InternalServerError. +- `"SessionChainingConfig"`: Contains information about attribute-based access control + (ABAC) for the training job. - `"Tags"`: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. @@ -4081,11 +4448,11 @@ end Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other -user-oriented features. This entity is created when a user onboards to Amazon SageMaker -Studio. If an administrator invites a person by email or imports them from IAM Identity -Center, a user profile is automatically created. A user profile is the primary holder of -settings for an individual user and has a reference to the user's private Amazon Elastic -File System (EFS) home directory. +user-oriented features. This entity is created when a user onboards to a domain. If an +administrator invites a person by email or imports them from IAM Identity Center, a user +profile is automatically created. A user profile is the primary holder of settings for an +individual user and has a reference to the user's private Amazon Elastic File System home +directory. # Arguments - `domain_id`: The ID of the associated Domain. @@ -4224,6 +4591,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expiring work items. - `"Tags"`: An array of key-value pairs. For more information, see Resource Tag and Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. +- `"WorkerAccessConfiguration"`: Use this optional parameter to constrain access to an + Amazon S3 resource based on the IP address using supported IAM global condition keys. The + Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. - `"WorkforceName"`: The name of the workforce. """ function create_workteam( @@ -4491,6 +4861,40 @@ function delete_association( ) end +""" + delete_cluster(cluster_name) + delete_cluster(cluster_name, params::Dict{String,<:Any}) + +Delete a SageMaker HyperPod cluster. + +# Arguments +- `cluster_name`: The string name or the Amazon Resource Name (ARN) of the SageMaker + HyperPod cluster to delete. + +""" +function delete_cluster(ClusterName; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "DeleteCluster", + Dict{String,Any}("ClusterName" => ClusterName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_cluster( + ClusterName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteCluster", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClusterName" => ClusterName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_code_repository(code_repository_name) delete_code_repository(code_repository_name, params::Dict{String,<:Any}) @@ -4528,6 +4932,48 @@ function delete_code_repository( ) end +""" + delete_compilation_job(compilation_job_name) + delete_compilation_job(compilation_job_name, params::Dict{String,<:Any}) + +Deletes the specified compilation job. This action deletes only the compilation job +resource in Amazon SageMaker. It doesn't delete other resources that are related to that +job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, +the compiled model, or the IAM role. You can delete a compilation job only if its current +status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop +the job, and then delete it after its status becomes STOPPED. + +# Arguments +- `compilation_job_name`: The name of the compilation job to delete. + +""" +function delete_compilation_job( + CompilationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteCompilationJob", + Dict{String,Any}("CompilationJobName" => CompilationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_compilation_job( + CompilationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteCompilationJob", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("CompilationJobName" => CompilationJobName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_context(context_name) delete_context(context_name, params::Dict{String,<:Any}) @@ -4889,7 +5335,8 @@ Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called. Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your -OfflineStore are not deleted. +OfflineStore are not deleted. Note that it can take approximately 10-15 minutes to delete +an OnlineStore FeatureGroup with the InMemory StorageType. # Arguments - `feature_group_name`: The name of the FeatureGroup you want to delete. The name must be @@ -4964,7 +5411,7 @@ end delete_hub(hub_name) delete_hub(hub_name, params::Dict{String,<:Any}) -Delete a hub. Hub APIs are only callable through SageMaker Studio. +Delete a hub. # Arguments - `hub_name`: The name of the hub to delete. @@ -4993,7 +5440,7 @@ end delete_hub_content(hub_content_name, hub_content_type, hub_content_version, hub_name) delete_hub_content(hub_content_name, hub_content_type, hub_content_version, hub_name, params::Dict{String,<:Any}) -Delete the contents of a hub. Hub APIs are only callable through SageMaker Studio. +Delete the contents of a hub. # Arguments - `hub_content_name`: The name of the content that you want to delete from a hub. @@ -5049,13 +5496,67 @@ function delete_hub_content( end """ - delete_human_task_ui(human_task_ui_name) - delete_human_task_ui(human_task_ui_name, params::Dict{String,<:Any}) + delete_hub_content_reference(hub_content_name, hub_content_type, hub_name) + delete_hub_content_reference(hub_content_name, hub_content_type, hub_name, params::Dict{String,<:Any}) -Use this operation to delete a human task user interface (worker task template). To see a -list of human task user interfaces (work task templates) in your account, use -ListHumanTaskUis. When you delete a worker task template, it no longer appears when you -call ListHumanTaskUis. +Delete a hub content reference in order to remove a model from a private hub. + +# Arguments +- `hub_content_name`: The name of the hub content to delete. +- `hub_content_type`: The type of hub content to delete. +- `hub_name`: The name of the hub to delete the hub content reference from. + +""" +function delete_hub_content_reference( + HubContentName, + HubContentType, + HubName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteHubContentReference", + Dict{String,Any}( + "HubContentName" => HubContentName, + "HubContentType" => HubContentType, + "HubName" => HubName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_hub_content_reference( + HubContentName, + HubContentType, + HubName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteHubContentReference", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "HubContentName" => HubContentName, + "HubContentType" => HubContentType, + "HubName" => HubName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_human_task_ui(human_task_ui_name) + delete_human_task_ui(human_task_ui_name, params::Dict{String,<:Any}) + +Use this operation to delete a human task user interface (worker task template). To see a +list of human task user interfaces (work task templates) in your account, use +ListHumanTaskUis. When you delete a worker task template, it no longer appears when you +call ListHumanTaskUis. # Arguments - `human_task_ui_name`: The name of the human task user interface (work task template) you @@ -5089,6 +5590,51 @@ function delete_human_task_ui( ) end +""" + delete_hyper_parameter_tuning_job(hyper_parameter_tuning_job_name) + delete_hyper_parameter_tuning_job(hyper_parameter_tuning_job_name, params::Dict{String,<:Any}) + +Deletes a hyperparameter tuning job. The DeleteHyperParameterTuningJob API deletes only the +tuning job entry that was created in SageMaker when you called the +CreateHyperParameterTuningJob API. It does not delete training jobs, artifacts, or the IAM +role that you specified when creating the model. + +# Arguments +- `hyper_parameter_tuning_job_name`: The name of the hyperparameter tuning job that you + want to delete. + +""" +function delete_hyper_parameter_tuning_job( + HyperParameterTuningJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteHyperParameterTuningJob", + Dict{String,Any}("HyperParameterTuningJobName" => HyperParameterTuningJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_hyper_parameter_tuning_job( + HyperParameterTuningJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteHyperParameterTuningJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "HyperParameterTuningJobName" => HyperParameterTuningJobName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_image(image_name) delete_image(image_name, params::Dict{String,<:Any}) @@ -5161,6 +5707,45 @@ function delete_image_version( ) end +""" + delete_inference_component(inference_component_name) + delete_inference_component(inference_component_name, params::Dict{String,<:Any}) + +Deletes an inference component. + +# Arguments +- `inference_component_name`: The name of the inference component to delete. + +""" +function delete_inference_component( + InferenceComponentName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteInferenceComponent", + Dict{String,Any}("InferenceComponentName" => InferenceComponentName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_inference_component( + InferenceComponentName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteInferenceComponent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InferenceComponentName" => InferenceComponentName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_inference_experiment(name) delete_inference_experiment(name, params::Dict{String,<:Any}) @@ -5193,6 +5778,43 @@ function delete_inference_experiment( ) end +""" + delete_mlflow_tracking_server(tracking_server_name) + delete_mlflow_tracking_server(tracking_server_name, params::Dict{String,<:Any}) + +Deletes an MLflow Tracking Server. For more information, see Clean up MLflow resources. + +# Arguments +- `tracking_server_name`: The name of the the tracking server to delete. + +""" +function delete_mlflow_tracking_server( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteMlflowTrackingServer", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_mlflow_tracking_server( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_model(model_name) delete_model(model_name, params::Dict{String,<:Any}) @@ -5707,7 +6329,7 @@ end Used to delete a space. # Arguments -- `domain_id`: The ID of the associated Domain. +- `domain_id`: The ID of the associated domain. - `space_name`: The name of the space. """ @@ -5745,12 +6367,14 @@ end delete_studio_lifecycle_config(studio_lifecycle_config_name) delete_studio_lifecycle_config(studio_lifecycle_config_name, params::Dict{String,<:Any}) -Deletes the Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, -there must be no running apps using the Lifecycle Configuration. You must also remove the -Lifecycle Configuration from UserSettings in all Domains and UserProfiles. +Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the +Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. +You must also remove the Lifecycle Configuration from UserSettings in all Domains and +UserProfiles. # Arguments -- `studio_lifecycle_config_name`: The name of the Studio Lifecycle Configuration to delete. +- `studio_lifecycle_config_name`: The name of the Amazon SageMaker Studio Lifecycle + Configuration to delete. """ function delete_studio_lifecycle_config( @@ -5789,9 +6413,9 @@ end Deletes the specified tags from an SageMaker resource. To list a resource's tags, use the ListTags API. When you call this API to delete tags from a hyperparameter tuning job, the deleted tags are not removed from training jobs that the hyperparameter tuning job launched -before you called this API. When you call this API to delete tags from a SageMaker Studio -Domain or User Profile, the deleted tags are not removed from Apps that the SageMaker -Studio Domain or User Profile launched before you called this API. +before you called this API. When you call this API to delete tags from a SageMaker Domain +or User Profile, the deleted tags are not removed from Apps that the SageMaker Domain or +User Profile launched before you called this API. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource whose tags you want to @@ -5955,7 +6579,7 @@ Amazon Web Services Region where a workforce already exists, use this operation the existing workforce and then use CreateWorkforce to create a new workforce. If a private workforce contains one or more work teams, you must use the DeleteWorkteam operation to delete all work teams before you delete the workforce. If you try to delete a -workforce that contains one or more work teams, you will recieve a ResourceInUse error. +workforce that contains one or more work teams, you will receive a ResourceInUse error. # Arguments - `workforce_name`: The name of the workforce. @@ -6257,7 +6881,8 @@ end describe_auto_mljob(auto_mljob_name) describe_auto_mljob(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML job. +Returns information about an AutoML job created by calling CreateAutoMLJob. AutoML jobs +created by calling CreateAutoMLJobV2 cannot be described by DescribeAutoMLJob. # Arguments - `auto_mljob_name`: Requests information about an AutoML job using its unique name. @@ -6292,12 +6917,11 @@ end describe_auto_mljob_v2(auto_mljob_name) describe_auto_mljob_v2(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML V2 job. This API action is callable -through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an -error. +Returns information about an AutoML job created by calling CreateAutoMLJobV2 or +CreateAutoMLJob. # Arguments -- `auto_mljob_name`: Requests information about an AutoML V2 job using its unique name. +- `auto_mljob_name`: Requests information about an AutoML job V2 using its unique name. """ function describe_auto_mljob_v2( @@ -6325,6 +6949,83 @@ function describe_auto_mljob_v2( ) end +""" + describe_cluster(cluster_name) + describe_cluster(cluster_name, params::Dict{String,<:Any}) + +Retrieves information of a SageMaker HyperPod cluster. + +# Arguments +- `cluster_name`: The string name or the Amazon Resource Name (ARN) of the SageMaker + HyperPod cluster. + +""" +function describe_cluster(ClusterName; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "DescribeCluster", + Dict{String,Any}("ClusterName" => ClusterName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_cluster( + ClusterName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeCluster", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClusterName" => ClusterName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_cluster_node(cluster_name, node_id) + describe_cluster_node(cluster_name, node_id, params::Dict{String,<:Any}) + +Retrieves information of a node (also called a instance interchangeably) of a SageMaker +HyperPod cluster. + +# Arguments +- `cluster_name`: The string name or the Amazon Resource Name (ARN) of the SageMaker + HyperPod cluster in which the node is. +- `node_id`: The ID of the SageMaker HyperPod cluster node. + +""" +function describe_cluster_node( + ClusterName, NodeId; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeClusterNode", + Dict{String,Any}("ClusterName" => ClusterName, "NodeId" => NodeId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_cluster_node( + ClusterName, + NodeId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeClusterNode", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ClusterName" => ClusterName, "NodeId" => NodeId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_code_repository(code_repository_name) describe_code_repository(code_repository_name, params::Dict{String,<:Any}) @@ -6785,7 +7486,8 @@ Use this operation to describe a FeatureGroup. The response includes information creation time, FeatureGroup name, the unique identifier for each FeatureGroup, and more. # Arguments -- `feature_group_name`: The name of the FeatureGroup you want described. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the FeatureGroup you want + described. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -6826,7 +7528,8 @@ end Shows the metadata for a feature within a feature group. # Arguments -- `feature_group_name`: The name of the feature group containing the feature. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group + containing the feature. - `feature_name`: The name of the feature. """ @@ -6905,7 +7608,7 @@ end describe_hub(hub_name) describe_hub(hub_name, params::Dict{String,<:Any}) -Describe a hub. Hub APIs are only callable through SageMaker Studio. +Describes a hub. # Arguments - `hub_name`: The name of the hub to describe. @@ -6934,7 +7637,7 @@ end describe_hub_content(hub_content_name, hub_content_type, hub_name) describe_hub_content(hub_content_name, hub_content_type, hub_name, params::Dict{String,<:Any}) -Describe the content of a hub. Hub APIs are only callable through SageMaker Studio. +Describe the content of a hub. # Arguments - `hub_content_name`: The name of the content to describe. @@ -7140,6 +7843,45 @@ function describe_image_version( ) end +""" + describe_inference_component(inference_component_name) + describe_inference_component(inference_component_name, params::Dict{String,<:Any}) + +Returns information about an inference component. + +# Arguments +- `inference_component_name`: The name of the inference component. + +""" +function describe_inference_component( + InferenceComponentName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeInferenceComponent", + Dict{String,Any}("InferenceComponentName" => InferenceComponentName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_inference_component( + InferenceComponentName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeInferenceComponent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InferenceComponentName" => InferenceComponentName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_inference_experiment(name) describe_inference_experiment(name, params::Dict{String,<:Any}) @@ -7279,6 +8021,43 @@ function describe_lineage_group( ) end +""" + describe_mlflow_tracking_server(tracking_server_name) + describe_mlflow_tracking_server(tracking_server_name, params::Dict{String,<:Any}) + +Returns information about an MLflow Tracking Server. + +# Arguments +- `tracking_server_name`: The name of the MLflow Tracking Server to describe. + +""" +function describe_mlflow_tracking_server( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeMlflowTrackingServer", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_mlflow_tracking_server( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_model(model_name) describe_model(model_name, params::Dict{String,<:Any}) @@ -7358,7 +8137,7 @@ Describes the content, creation time, and security configuration of an Amazon Sa Model Card. # Arguments -- `model_card_name`: The name of the model card to describe. +- `model_card_name`: The name or Amazon Resource Name (ARN) of the model card to describe. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7473,8 +8252,10 @@ end describe_model_package(model_package_name, params::Dict{String,<:Any}) Returns a description of the specified model package, which is used to create SageMaker -models or list them on Amazon Web Services Marketplace. To create models in SageMaker, -buyers can subscribe to model packages listed on Amazon Web Services Marketplace. +models or list them on Amazon Web Services Marketplace. If you provided a KMS Key ID when +you created your model package, you will see the KMS Decrypt API call in your CloudTrail +logs when you use this API. To create models in SageMaker, buyers can subscribe to model +packages listed on Amazon Web Services Marketplace. # Arguments - `model_package_name`: The name or Amazon Resource Name (ARN) of the model package to @@ -7516,7 +8297,7 @@ end Gets a description for the specified model group. # Arguments -- `model_package_group_name`: The name of gthe model group to describe. +- `model_package_group_name`: The name of the model group to describe. """ function describe_model_package_group( @@ -7901,7 +8682,7 @@ end Describes the space. # Arguments -- `domain_id`: The ID of the associated Domain. +- `domain_id`: The ID of the associated domain. - `space_name`: The name of the space. """ @@ -7939,11 +8720,11 @@ end describe_studio_lifecycle_config(studio_lifecycle_config_name) describe_studio_lifecycle_config(studio_lifecycle_config_name, params::Dict{String,<:Any}) -Describes the Studio Lifecycle Configuration. +Describes the Amazon SageMaker Studio Lifecycle Configuration. # Arguments -- `studio_lifecycle_config_name`: The name of the Studio Lifecycle Configuration to - describe. +- `studio_lifecycle_config_name`: The name of the Amazon SageMaker Studio Lifecycle + Configuration to describe. """ function describe_studio_lifecycle_config( @@ -8246,7 +9027,7 @@ end describe_workteam(workteam_name) describe_workteam(workteam_name, params::Dict{String,<:Any}) -Gets information about a specific work team. You can see information such as the create +Gets information about a specific work team. You can see information such as the creation date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN). @@ -8527,6 +9308,65 @@ function get_sagemaker_servicecatalog_portfolio_status( ) end +""" + get_scaling_configuration_recommendation(inference_recommendations_job_name) + get_scaling_configuration_recommendation(inference_recommendations_job_name, params::Dict{String,<:Any}) + +Starts an Amazon SageMaker Inference Recommender autoscaling recommendation job. Returns +recommendations for autoscaling policies that you can apply to your SageMaker endpoint. + +# Arguments +- `inference_recommendations_job_name`: The name of a previously completed Inference + Recommender job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"EndpointName"`: The name of an endpoint benchmarked during a previously completed + inference recommendation job. This name should come from one of the recommendations + returned by the job specified in the InferenceRecommendationsJobName field. Specify either + this field or the RecommendationId field. +- `"RecommendationId"`: The recommendation ID of a previously completed inference + recommendation. This ID should come from one of the recommendations returned by the job + specified in the InferenceRecommendationsJobName field. Specify either this field or the + EndpointName field. +- `"ScalingPolicyObjective"`: An object where you specify the anticipated traffic pattern + for an endpoint. +- `"TargetCpuUtilizationPerCore"`: The percentage of how much utilization you want an + instance to use before autoscaling. The default value is 50%. +""" +function get_scaling_configuration_recommendation( + InferenceRecommendationsJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "GetScalingConfigurationRecommendation", + Dict{String,Any}( + "InferenceRecommendationsJobName" => InferenceRecommendationsJobName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_scaling_configuration_recommendation( + InferenceRecommendationsJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "GetScalingConfigurationRecommendation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InferenceRecommendationsJobName" => InferenceRecommendationsJobName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_search_suggestions(resource) get_search_suggestions(resource, params::Dict{String,<:Any}) @@ -8569,7 +9409,7 @@ end import_hub_content(document_schema_version, hub_content_document, hub_content_name, hub_content_type, hub_name) import_hub_content(document_schema_version, hub_content_document, hub_content_name, hub_content_type, hub_name, params::Dict{String,<:Any}) -Import hub content. Hub APIs are only callable through SageMaker Studio. +Import hub content. # Arguments - `document_schema_version`: The version of the hub content schema to import. @@ -8800,10 +9640,11 @@ Lists apps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DomainIdEquals"`: A parameter to search for the domain ID. -- `"MaxResults"`: The total number of items to return in the response. If the total number - of items available is more than the value specified, a NextToken is provided in the - response. To resume pagination, provide the NextToken value in the as part of a subsequent - call. The default value is 10. +- `"MaxResults"`: This parameter defines the maximum number of results that can be return + in a single response. The MaxResults parameter is an upper bound, not a target. If there + are more results available than the value specified, a NextToken is provided in the + response. The NextToken indicates that the user should get the next set of results by + providing this token as a part of a subsequent call. The default value for MaxResults is 10. - `"NextToken"`: If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. - `"SortBy"`: The parameter by which to sort the results. The default is CreationTime. @@ -8974,6 +9815,103 @@ function list_candidates_for_auto_mljob( ) end +""" + list_cluster_nodes(cluster_name) + list_cluster_nodes(cluster_name, params::Dict{String,<:Any}) + +Retrieves the list of instances (also called nodes interchangeably) in a SageMaker HyperPod +cluster. + +# Arguments +- `cluster_name`: The string name or the Amazon Resource Name (ARN) of the SageMaker + HyperPod cluster in which you want to retrieve the list of nodes. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: A filter that returns nodes in a SageMaker HyperPod cluster + created after the specified time. Timestamps are formatted according to the ISO 8601 + standard. Acceptable formats include: YYYY-MM-DDThh:mm:ss.sssTZD (UTC), for example, + 2014-10-01T20:30:00.000Z YYYY-MM-DDThh:mm:ss.sssTZD (with offset), for example, + 2014-10-01T12:30:00.000-08:00 YYYY-MM-DD, for example, 2014-10-01 Unix time in + seconds, for example, 1412195400. This is also referred to as Unix Epoch time and + represents the number of seconds since midnight, January 1, 1970 UTC. For more + information about the timestamp format, see Timestamp in the Amazon Web Services Command + Line Interface User Guide. +- `"CreationTimeBefore"`: A filter that returns nodes in a SageMaker HyperPod cluster + created before the specified time. The acceptable formats are the same as the timestamp + formats for CreationTimeAfter. For more information about the timestamp format, see + Timestamp in the Amazon Web Services Command Line Interface User Guide. +- `"InstanceGroupNameContains"`: A filter that returns the instance groups whose name + contain a specified string. +- `"MaxResults"`: The maximum number of nodes to return in the response. +- `"NextToken"`: If the result of the previous ListClusterNodes request was truncated, the + response includes a NextToken. To retrieve the next set of cluster nodes, use the token in + the next request. +- `"SortBy"`: The field by which to sort results. The default value is CREATION_TIME. +- `"SortOrder"`: The sort order for results. The default value is Ascending. +""" +function list_cluster_nodes(ClusterName; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListClusterNodes", + Dict{String,Any}("ClusterName" => ClusterName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cluster_nodes( + ClusterName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "ListClusterNodes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClusterName" => ClusterName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_clusters() + list_clusters(params::Dict{String,<:Any}) + +Retrieves the list of SageMaker HyperPod clusters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Set a start time for the time range during which you want to list + SageMaker HyperPod clusters. Timestamps are formatted according to the ISO 8601 standard. + Acceptable formats include: YYYY-MM-DDThh:mm:ss.sssTZD (UTC), for example, + 2014-10-01T20:30:00.000Z YYYY-MM-DDThh:mm:ss.sssTZD (with offset), for example, + 2014-10-01T12:30:00.000-08:00 YYYY-MM-DD, for example, 2014-10-01 Unix time in + seconds, for example, 1412195400. This is also referred to as Unix Epoch time and + represents the number of seconds since midnight, January 1, 1970 UTC. For more + information about the timestamp format, see Timestamp in the Amazon Web Services Command + Line Interface User Guide. +- `"CreationTimeBefore"`: Set an end time for the time range during which you want to list + SageMaker HyperPod clusters. A filter that returns nodes in a SageMaker HyperPod cluster + created before the specified time. The acceptable formats are the same as the timestamp + formats for CreationTimeAfter. For more information about the timestamp format, see + Timestamp in the Amazon Web Services Command Line Interface User Guide. +- `"MaxResults"`: Set the maximum number of SageMaker HyperPod clusters to list. +- `"NameContains"`: Set the maximum number of instances to print in the list. +- `"NextToken"`: Set the next token to retrieve the list of SageMaker HyperPod clusters. +- `"SortBy"`: The field by which to sort results. The default value is CREATION_TIME. +- `"SortOrder"`: The sort order for results. The default value is Ascending. +""" +function list_clusters(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker("ListClusters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_clusters( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListClusters", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_code_repositories() list_code_repositories(params::Dict{String,<:Any}) @@ -9115,7 +10053,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys truncated, the response includes a NextToken. To retrieve the next set of transform jobs, use the token in the next request.> - `"SortBy"`: The field to sort results by. The default is CreationTime. -- `"SortOrder"`: The sort order for results. The default is Descending. +- `"SortOrder"`: Whether to sort the results in Ascending or Descending order. The default + is Descending. """ function list_data_quality_job_definitions(; aws_config::AbstractAWSConfig=global_aws_config() @@ -9205,10 +10144,11 @@ Lists the domains. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The total number of items to return in the response. If the total number - of items available is more than the value specified, a NextToken is provided in the - response. To resume pagination, provide the NextToken value in the as part of a subsequent - call. The default value is 10. +- `"MaxResults"`: This parameter defines the maximum number of results that can be return + in a single response. The MaxResults parameter is an upper bound, not a target. If there + are more results available than the value specified, a NextToken is provided in the + response. The NextToken indicates that the user should get the next set of results by + providing this token as a part of a subsequent call. The default value for MaxResults is 10. - `"NextToken"`: If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. """ @@ -9483,7 +10423,7 @@ end list_hub_content_versions(hub_content_name, hub_content_type, hub_name) list_hub_content_versions(hub_content_name, hub_content_type, hub_name, params::Dict{String,<:Any}) -List hub content versions. Hub APIs are only callable through SageMaker Studio. +List hub content versions. # Arguments - `hub_content_name`: The name of the hub content. @@ -9551,7 +10491,7 @@ end list_hub_contents(hub_content_type, hub_name) list_hub_contents(hub_content_type, hub_name, params::Dict{String,<:Any}) -List the contents of a hub. Hub APIs are only callable through SageMaker Studio. +List the contents of a hub. # Arguments - `hub_content_type`: The type of hub content to list. @@ -9604,7 +10544,7 @@ end list_hubs() list_hubs(params::Dict{String,<:Any}) -List all existing hubs. Hub APIs are only callable through SageMaker Studio. +List all existing hubs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -9787,14 +10727,64 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SortBy"`: The property used to sort results. The default value is CREATION_TIME. - `"SortOrder"`: The sort order. The default value is DESCENDING. """ -function list_images(; aws_config::AbstractAWSConfig=global_aws_config()) - return sagemaker("ListImages"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +function list_images(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker("ListImages"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_images( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListImages", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_inference_components() + list_inference_components(params::Dict{String,<:Any}) + +Lists the inference components in your account and their properties. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Filters the results to only those inference components that were + created after the specified time. +- `"CreationTimeBefore"`: Filters the results to only those inference components that were + created before the specified time. +- `"EndpointNameEquals"`: An endpoint name to filter the listed inference components. The + response includes only those inference components that are hosted at the specified endpoint. +- `"LastModifiedTimeAfter"`: Filters the results to only those inference components that + were updated after the specified time. +- `"LastModifiedTimeBefore"`: Filters the results to only those inference components that + were updated before the specified time. +- `"MaxResults"`: The maximum number of inference components to return in the response. + This value defaults to 10. +- `"NameContains"`: Filters the results to only those inference components with a name that + contains the specified string. +- `"NextToken"`: A token that you use to get the next set of results following a truncated + response. If the response to the previous request was truncated, that response provides the + value for this token. +- `"SortBy"`: The field by which to sort the inference components in the response. The + default is CreationTime. +- `"SortOrder"`: The sort order for results. The default is Descending. +- `"StatusEquals"`: Filters the results to only those inference components with the + specified status. +- `"VariantNameEquals"`: A production variant name to filter the listed inference + components. The response includes only those inference components that are hosted at the + specified variant. +""" +function list_inference_components(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListInferenceComponents"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) end -function list_images( +function list_inference_components( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return sagemaker( - "ListImages", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "ListInferenceComponents", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end @@ -10056,6 +11046,49 @@ function list_lineage_groups( ) end +""" + list_mlflow_tracking_servers() + list_mlflow_tracking_servers(params::Dict{String,<:Any}) + +Lists all MLflow Tracking Servers. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreatedAfter"`: Use the CreatedAfter filter to only list tracking servers created after + a specific date and time. Listed tracking servers are shown with a date and time such as + \"2024-03-16T01:46:56+00:00\". The CreatedAfter parameter takes in a Unix timestamp. To + convert a date and time into a Unix timestamp, see EpochConverter. +- `"CreatedBefore"`: Use the CreatedBefore filter to only list tracking servers created + before a specific date and time. Listed tracking servers are shown with a date and time + such as \"2024-03-16T01:46:56+00:00\". The CreatedBefore parameter takes in a Unix + timestamp. To convert a date and time into a Unix timestamp, see EpochConverter. +- `"MaxResults"`: The maximum number of tracking servers to list. +- `"MlflowVersion"`: Filter for tracking servers using the specified MLflow version. +- `"NextToken"`: If the previous response was truncated, you will receive this token. Use + it in your next request to receive the next set of results. +- `"SortBy"`: Filter for trackings servers sorting by name, creation time, or creation + status. +- `"SortOrder"`: Change the order of the listed tracking servers. By default, tracking + servers are listed in Descending order by creation time. To change the list order, you can + specify SortOrder to be Ascending. +- `"TrackingServerStatus"`: Filter for tracking servers with a specified creation status. +""" +function list_mlflow_tracking_servers(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListMlflowTrackingServers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_mlflow_tracking_servers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListMlflowTrackingServers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_model_bias_job_definitions() list_model_bias_job_definitions(params::Dict{String,<:Any}) @@ -10158,7 +11191,8 @@ end List existing versions of an Amazon SageMaker Model Card. # Arguments -- `model_card_name`: List model card versions for the model card with the specified name. +- `model_card_name`: List model card versions for the model card with the specified name or + Amazon Resource Name (ARN). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -10319,6 +11353,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified time. - `"CreationTimeBefore"`: A filter that returns only model groups created before the specified time. +- `"CrossAccountFilterOption"`: A filter that returns either model groups shared with you + or model groups in your own account. When the value is CrossAccount, the results show the + resources made discoverable to you from other accounts. When the value is SameAccount or + null, the results show resources from your account. The default is SameAccount. - `"MaxResults"`: The maximum number of results to return in the response. - `"NameContains"`: A string in the model group name. This filter returns only model groups whose name contains the specified string. @@ -10408,7 +11446,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys truncated, the response includes a NextToken. To retrieve the next set of model quality monitoring job definitions, use the token in the next request. - `"SortBy"`: The field to sort results by. The default is CreationTime. -- `"SortOrder"`: The sort order for results. The default is Descending. +- `"SortOrder"`: Whether to sort the results in Ascending or Descending order. The default + is Descending. """ function list_model_quality_job_definitions(; aws_config::AbstractAWSConfig=global_aws_config() @@ -10572,8 +11611,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys of job executions, use it in the next request. - `"ScheduledTimeAfter"`: Filter for jobs scheduled after a specified time. - `"ScheduledTimeBefore"`: Filter for jobs scheduled before a specified time. -- `"SortBy"`: Whether to sort results by Status, CreationTime, ScheduledTime field. The - default is CreationTime. +- `"SortBy"`: Whether to sort the results by the Status, CreationTime, or ScheduledTime + field. The default is CreationTime. - `"SortOrder"`: Whether to sort the results in Ascending or Descending order. The default is Descending. - `"StatusEquals"`: A filter that retrieves only jobs with a specific status. @@ -10620,8 +11659,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NameContains"`: Filter for monitoring schedules whose name contains a specified string. - `"NextToken"`: The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request. -- `"SortBy"`: Whether to sort results by Status, CreationTime, ScheduledTime field. The - default is CreationTime. +- `"SortBy"`: Whether to sort the results by the Status, CreationTime, or ScheduledTime + field. The default is CreationTime. - `"SortOrder"`: Whether to sort the results in Ascending or Descending order. The default is Descending. - `"StatusEquals"`: A filter that returns only monitoring schedules modified before a @@ -10970,6 +12009,42 @@ function list_projects( ) end +""" + list_resource_catalogs() + list_resource_catalogs(params::Dict{String,<:Any}) + + Lists Amazon SageMaker Catalogs based on given filters and orders. The maximum number of +ResourceCatalogs viewable is 1000. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Use this parameter to search for ResourceCatalogs created after a + specific date and time. +- `"CreationTimeBefore"`: Use this parameter to search for ResourceCatalogs created before + a specific date and time. +- `"MaxResults"`: The maximum number of results returned by ListResourceCatalogs. +- `"NameContains"`: A string that partially matches one or more ResourceCatalogs names. + Filters ResourceCatalog by name. +- `"NextToken"`: A token to resume pagination of ListResourceCatalogs results. +- `"SortBy"`: The value on which the resource catalog list is sorted. +- `"SortOrder"`: The order in which the resource catalogs are listed. +""" +function list_resource_catalogs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListResourceCatalogs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_resource_catalogs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListResourceCatalogs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_spaces() list_spaces(params::Dict{String,<:Any}) @@ -10978,11 +12053,12 @@ Lists spaces. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DomainIdEquals"`: A parameter to search for the Domain ID. -- `"MaxResults"`: The total number of items to return in the response. If the total number - of items available is more than the value specified, a NextToken is provided in the - response. To resume pagination, provide the NextToken value in the as part of a subsequent - call. The default value is 10. +- `"DomainIdEquals"`: A parameter to search for the domain ID. +- `"MaxResults"`: This parameter defines the maximum number of results that can be return + in a single response. The MaxResults parameter is an upper bound, not a target. If there + are more results available than the value specified, a NextToken is provided in the + response. The NextToken indicates that the user should get the next set of results by + providing this token as a part of a subsequent call. The default value for MaxResults is 10. - `"NextToken"`: If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. - `"SortBy"`: The parameter by which to sort the results. The default is CreationTime. @@ -11058,7 +12134,8 @@ end list_studio_lifecycle_configs() list_studio_lifecycle_configs(params::Dict{String,<:Any}) -Lists the Studio Lifecycle Configurations in your Amazon Web Services Account. +Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services +Account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11404,10 +12481,11 @@ Lists user profiles. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DomainIdEquals"`: A parameter by which to filter the results. -- `"MaxResults"`: The total number of items to return in the response. If the total number - of items available is more than the value specified, a NextToken is provided in the - response. To resume pagination, provide the NextToken value in the as part of a subsequent - call. The default value is 10. +- `"MaxResults"`: This parameter defines the maximum number of results that can be return + in a single response. The MaxResults parameter is an upper bound, not a target. If there + are more results available than the value specified, a NextToken is provided in the + response. The NextToken indicates that the user should get the next set of results by + providing this token as a part of a subsequent call. The default value for MaxResults is 10. - `"NextToken"`: If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. - `"SortBy"`: The parameter by which to sort the results. The default is CreationTime. @@ -11739,6 +12817,12 @@ Resources Reference for more information. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CrossAccountFilterOption"`: A cross account filter option. When the value is + \"CrossAccount\" the search results will only include resources made discoverable to you + from other accounts. When the value is \"SameAccount\" or null the search results will only + include resources from your account. Default is null. For more information on searching for + resources made discoverable to your account, see Search discoverable resources in the + SageMaker Developer Guide. The maximum number of ResourceCatalogs viewable is 1000. - `"MaxResults"`: The maximum number of results to return. - `"NextToken"`: If more than MaxResults resources match the specified SearchExpression, the response includes a NextToken. The NextToken can be passed to the next SearchRequest to @@ -11751,6 +12835,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys is LastModifiedTime. - `"SortOrder"`: How SearchResults are ordered. Valid values are Ascending or Descending. The default is Descending. +- `"VisibilityConditions"`: Limits the results of your search request to the resources + that you can access. """ function search(Resource; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( @@ -11953,6 +13039,43 @@ function start_inference_experiment( ) end +""" + start_mlflow_tracking_server(tracking_server_name) + start_mlflow_tracking_server(tracking_server_name, params::Dict{String,<:Any}) + +Programmatically start an MLflow Tracking Server. + +# Arguments +- `tracking_server_name`: The name of the tracking server to start. + +""" +function start_mlflow_tracking_server( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StartMlflowTrackingServer", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_mlflow_tracking_server( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StartMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_monitoring_schedule(monitoring_schedule_name) start_monitoring_schedule(monitoring_schedule_name, params::Dict{String,<:Any}) @@ -12420,6 +13543,43 @@ function stop_labeling_job( ) end +""" + stop_mlflow_tracking_server(tracking_server_name) + stop_mlflow_tracking_server(tracking_server_name, params::Dict{String,<:Any}) + +Programmatically stop an MLflow Tracking Server. + +# Arguments +- `tracking_server_name`: The name of the tracking server to stop. + +""" +function stop_mlflow_tracking_server( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StopMlflowTrackingServer", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_mlflow_tracking_server( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StopMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_monitoring_schedule(monitoring_schedule_name) stop_monitoring_schedule(monitoring_schedule_name, params::Dict{String,<:Any}) @@ -12734,6 +13894,8 @@ Updates the properties of an AppImageConfig. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CodeEditorAppImageConfig"`: The Code Editor app running on the image. +- `"JupyterLabAppImageConfig"`: The JupyterLab app running on the image. - `"KernelGatewayImageConfig"`: The new KernelGateway app to run on the image. """ function update_app_image_config( @@ -12801,6 +13963,86 @@ function update_artifact( ) end +""" + update_cluster(cluster_name, instance_groups) + update_cluster(cluster_name, instance_groups, params::Dict{String,<:Any}) + +Updates a SageMaker HyperPod cluster. + +# Arguments +- `cluster_name`: Specify the name of the SageMaker HyperPod cluster you want to update. +- `instance_groups`: Specify the instance groups to update. + +""" +function update_cluster( + ClusterName, InstanceGroups; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "UpdateCluster", + Dict{String,Any}("ClusterName" => ClusterName, "InstanceGroups" => InstanceGroups); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_cluster( + ClusterName, + InstanceGroups, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterName" => ClusterName, "InstanceGroups" => InstanceGroups + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_cluster_software(cluster_name) + update_cluster_software(cluster_name, params::Dict{String,<:Any}) + +Updates the platform software of a SageMaker HyperPod cluster for security patching. To +learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster. + +# Arguments +- `cluster_name`: Specify the name or the Amazon Resource Name (ARN) of the SageMaker + HyperPod cluster you want to update for security patching. + +""" +function update_cluster_software( + ClusterName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "UpdateClusterSoftware", + Dict{String,Any}("ClusterName" => ClusterName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_cluster_software( + ClusterName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateClusterSoftware", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClusterName" => ClusterName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_code_repository(code_repository_name) update_code_repository(code_repository_name, params::Dict{String,<:Any}) @@ -12990,15 +14232,25 @@ Updates the default settings for new user profiles in the domain. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AppNetworkAccessType"`: Specifies the VPC used for non-EFS traffic. + PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which + allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC + and subnets. This configuration can only be modified if there are no apps in the + InService, Pending, or Deleting state. The configuration cannot be updated if + DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or + DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of + the same request. - `"AppSecurityGroupManagement"`: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. -- `"DefaultSpaceSettings"`: The default settings used to create a space within the Domain. +- `"DefaultSpaceSettings"`: The default settings used to create a space within the domain. - `"DefaultUserSettings"`: A collection of settings. - `"DomainSettingsForUpdate"`: A collection of DomainSettings configuration values to update. +- `"SubnetIds"`: The VPC subnets that Studio uses for communication. If removing subnets, + ensure there are no apps in the InService, Pending, or Deleting state. """ function update_domain(DomainId; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( @@ -13027,12 +14279,14 @@ end update_endpoint(endpoint_config_name, endpoint_name) update_endpoint(endpoint_config_name, endpoint_name, params::Dict{String,<:Any}) -Deploys the new EndpointConfig specified in the request, switches to using newly created -endpoint, and then deletes resources provisioned for the endpoint using the previous -EndpointConfig (there is no availability loss). When SageMaker receives the request, it -sets the endpoint status to Updating. After updating the endpoint, it sets the status to -InService. To check the status of an endpoint, use the DescribeEndpoint API. You must not -delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or +Deploys the EndpointConfig specified in the request to a new fleet of instances. SageMaker +shifts endpoint traffic to the new instances with the updated endpoint configuration and +then deletes the old instances using the previous EndpointConfig (there is no availability +loss). For more information about how to control the update and traffic shifting process, +see Update models in production. When SageMaker receives the request, it sets the endpoint +status to Updating. After updating the endpoint, it sets the status to InService. To check +the status of an endpoint, use the DescribeEndpoint API. You must not delete an +EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig. If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the @@ -13193,10 +14447,20 @@ end update_feature_group(feature_group_name) update_feature_group(feature_group_name, params::Dict{String,<:Any}) -Updates the feature group. +Updates the feature group by either adding features or updating the online store +configuration. Use one of the following request parameters at a time while using the +UpdateFeatureGroup API. You can add features for your feature group using the +FeatureAdditions request parameter. Features cannot be removed from a feature group. You +can update the online store configuration by using the OnlineStoreConfig request parameter. +If a TtlDuration is specified, the default TtlDuration applies for all records added to the +feature group after the feature group is updated. If a record level TtlDuration exists from +using the PutRecord API, the record level TtlDuration applies to that record instead of the +default TtlDuration. To remove the default TtlDuration from an existing feature group, use +the UpdateFeatureGroup API and set the TtlDuration Unit and Value to null. # Arguments -- `feature_group_name`: The name of the feature group that you're updating. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group that + you're updating. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13204,6 +14468,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys asynchronous operation. When you get an HTTP 200 response, you've made a valid request. It takes some time after you've made a valid request for Feature Store to update the feature group. +- `"OnlineStoreConfig"`: Updates the feature group online store configuration. +- `"ThroughputConfig"`: """ function update_feature_group( FeatureGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -13239,8 +14505,8 @@ end Updates the description and parameters of the feature group. # Arguments -- `feature_group_name`: The name of the feature group containing the feature that you're - updating. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group + containing the feature that you're updating. - `feature_name`: The name of the feature that you're updating. # Optional Parameters @@ -13289,7 +14555,7 @@ end update_hub(hub_name) update_hub(hub_name, params::Dict{String,<:Any}) -Update a hub. Hub APIs are only callable through SageMaker Studio. +Update a hub. # Arguments - `hub_name`: The name of the hub to update. @@ -13416,6 +14682,101 @@ function update_image_version( ) end +""" + update_inference_component(inference_component_name) + update_inference_component(inference_component_name, params::Dict{String,<:Any}) + +Updates an inference component. + +# Arguments +- `inference_component_name`: The name of the inference component. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RuntimeConfig"`: Runtime settings for a model that is deployed with an inference + component. +- `"Specification"`: Details about the resources to deploy with this inference component, + including the model, container, and compute resources. +""" +function update_inference_component( + InferenceComponentName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "UpdateInferenceComponent", + Dict{String,Any}("InferenceComponentName" => InferenceComponentName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_inference_component( + InferenceComponentName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateInferenceComponent", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InferenceComponentName" => InferenceComponentName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_inference_component_runtime_config(desired_runtime_config, inference_component_name) + update_inference_component_runtime_config(desired_runtime_config, inference_component_name, params::Dict{String,<:Any}) + +Runtime settings for a model that is deployed with an inference component. + +# Arguments +- `desired_runtime_config`: Runtime settings for a model that is deployed with an inference + component. +- `inference_component_name`: The name of the inference component to update. + +""" +function update_inference_component_runtime_config( + DesiredRuntimeConfig, + InferenceComponentName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateInferenceComponentRuntimeConfig", + Dict{String,Any}( + "DesiredRuntimeConfig" => DesiredRuntimeConfig, + "InferenceComponentName" => InferenceComponentName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_inference_component_runtime_config( + DesiredRuntimeConfig, + InferenceComponentName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateInferenceComponentRuntimeConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DesiredRuntimeConfig" => DesiredRuntimeConfig, + "InferenceComponentName" => InferenceComponentName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_inference_experiment(name) update_inference_experiment(name, params::Dict{String,<:Any}) @@ -13464,6 +14825,55 @@ function update_inference_experiment( ) end +""" + update_mlflow_tracking_server(tracking_server_name) + update_mlflow_tracking_server(tracking_server_name, params::Dict{String,<:Any}) + +Updates properties of an existing MLflow Tracking Server. + +# Arguments +- `tracking_server_name`: The name of the MLflow Tracking Server to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ArtifactStoreUri"`: The new S3 URI for the general purpose bucket to use as the + artifact store for the MLflow Tracking Server. +- `"AutomaticModelRegistration"`: Whether to enable or disable automatic registration of + new MLflow models to the SageMaker Model Registry. To enable automatic model registration, + set this value to True. To disable automatic model registration, set this value to False. + If not specified, AutomaticModelRegistration defaults to False +- `"TrackingServerSize"`: The new size for the MLflow Tracking Server. +- `"WeeklyMaintenanceWindowStart"`: The new weekly maintenance window start day and time to + update. The maintenance window day and time should be in Coordinated Universal Time (UTC) + 24-hour standard time. For example: TUE:03:30. +""" +function update_mlflow_tracking_server( + TrackingServerName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "UpdateMlflowTrackingServer", + Dict{String,Any}("TrackingServerName" => TrackingServerName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_mlflow_tracking_server( + TrackingServerName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateMlflowTrackingServer", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TrackingServerName" => TrackingServerName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_model_card(model_card_name) update_model_card(model_card_name, params::Dict{String,<:Any}) @@ -13472,7 +14882,7 @@ Update an Amazon SageMaker Model Card. You cannot update both model card conten card status in a single call. # Arguments -- `model_card_name`: The name of the model card to update. +- `model_card_name`: The name or Amazon Resource Name (ARN) of the model card to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13529,7 +14939,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys versions. - `"CustomerMetadataPropertiesToRemove"`: The metadata properties associated with the model package versions to remove. +- `"InferenceSpecification"`: Specifies details about inference jobs that you can run with + models based on this model package, including the following information: The Amazon ECR + paths of containers that contain the inference code and model artifacts. The instance + types that the model package supports for transform jobs and real-time endpoints used for + inference. The input and output content formats that the model package supports for + inference. - `"ModelApprovalStatus"`: The approval status of the model. +- `"ModelCard"`: The model card associated with the model package. Since + ModelPackageModelCard is tied to a model package, it is a specific usage of a model card + and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard + schema does not include model_package_details, and model_overview is composed of the + model_creator and model_artifact properties. For more information about the model package + model card schema, see Model package model card schema. For more information about the + model card associated with the model package, see View the Details of a Model Version. +- `"SourceUri"`: The URI of the source for the model package. """ function update_model_package( ModelPackageArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -13958,11 +15382,13 @@ end Updates the settings of a space. # Arguments -- `domain_id`: The ID of the associated Domain. +- `domain_id`: The ID of the associated domain. - `space_name`: The name of the space. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"SpaceDisplayName"`: The name of the space that appears in the Amazon SageMaker Studio + UI. - `"SpaceSettings"`: A collection of space settings. """ function update_space( @@ -14012,6 +15438,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys monitoring, framework profiling, and storage paths. - `"ProfilerRuleConfigurations"`: Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. +- `"RemoteDebugConfig"`: Configuration for remote debugging while the training job is + running. You can update the remote debugging configuration when the SecondaryStatus of the + job is Downloading or Training.To learn more about the remote debugging functionality of + SageMaker, see Access a training container through Amazon Web Services Systems Manager + (SSM) for remote debugging. - `"ResourceConfig"`: The training job ResourceConfig to update warm pool retention length. """ function update_training_job( @@ -14267,6 +15698,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys associated with the work team you update. - `"NotificationConfiguration"`: Configures SNS topic notifications for available or expiring work items +- `"WorkerAccessConfiguration"`: Use this optional parameter to constrain access to an + Amazon S3 resource based on the IP address using supported IAM global condition keys. The + Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. """ function update_workteam(WorkteamName; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( diff --git a/src/services/sagemaker_featurestore_runtime.jl b/src/services/sagemaker_featurestore_runtime.jl index a37491edb9..780c19d874 100644 --- a/src/services/sagemaker_featurestore_runtime.jl +++ b/src/services/sagemaker_featurestore_runtime.jl @@ -11,9 +11,15 @@ using AWS.UUIDs Retrieves a batch of Records from a FeatureGroup. # Arguments -- `identifiers`: A list of FeatureGroup names, with their corresponding RecordIdentifier - value, and Feature name that have been requested to be retrieved in batch. +- `identifiers`: A list containing the name or Amazon Resource Name (ARN) of the + FeatureGroup, the list of names of Features to be retrieved, and the corresponding + RecordIdentifier values as strings. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpirationTimeResponse"`: Parameter to request ExpiresAt in response. If Enabled, + BatchGetRecord will return the value of ExpiresAt, if it is not null. If Disabled and null, + BatchGetRecord will return null. """ function batch_get_record(Identifiers; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker_featurestore_runtime( @@ -45,22 +51,29 @@ end delete_record(event_time, feature_group_name, record_identifier_value_as_string, params::Dict{String,<:Any}) Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both -SOFT_DELETE and HARD_DELETE. For SOFT_DELETE (default), feature columns are set to null and -the record is no longer retrievable by GetRecord or BatchGetRecord. For HARD_DELETE, the +SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and +the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the -deleted record marker to the OfflineStore with feature values set to null, is_deleted value -set to True, and EventTime set to the delete input EventTime. Note that the EventTime -specified in DeleteRecord should be set later than the EventTime of the existing record in -the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur: For -SOFT_DELETE, the existing (undeleted) record remains in the OnlineStore, though the delete -record marker is still written to the OfflineStore. HARD_DELETE returns EventTime: 400 -ValidationException to indicate that the delete operation failed. No delete record marker -is written to the OfflineStore. +deleted record marker to the OfflineStore. The deleted record marker is a record with the +same RecordIdentifer as the original, but with is_deleted value set to True, EventTime set +to the delete input EventTime, and other feature values set to null. Note that the +EventTime specified in DeleteRecord should be set later than the EventTime of the existing +record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not +occur: For SoftDelete, the existing (not deleted) record remains in the OnlineStore, +though the delete record marker is still written to the OfflineStore. HardDelete returns +EventTime: 400 ValidationException to indicate that the delete operation failed. No delete +record marker is written to the OfflineStore. When a record is deleted from the +OnlineStore, the deleted record marker is appended to the OfflineStore. If you have the +Iceberg table format enabled for your OfflineStore, you can remove all history of a record +from the OfflineStore using Amazon Athena or Apache Spark. For information on how to hard +delete a record from the OfflineStore with the Iceberg table format enabled, see Delete +records from the offline store. # Arguments - `event_time`: Timestamp indicating when the deletion event occurred. EventTime can be used to query data at a certain point in time. -- `feature_group_name`: The name of the feature group to delete the record from. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group to + delete the record from. - `record_identifier_value_as_string`: The value for the RecordIdentifier that uniquely identifies the record, in string format. @@ -123,13 +136,16 @@ OnlineStore can be retrieved. If no Record with RecordIdentifierValue is found, empty result is returned. # Arguments -- `feature_group_name`: The name of the feature group from which you want to retrieve a - record. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group from + which you want to retrieve a record. - `record_identifier_value_as_string`: The value that corresponds to RecordIdentifier type and uniquely identifies the record in the FeatureGroup. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpirationTimeResponse"`: Parameter to request ExpiresAt in response. If Enabled, + GetRecord will return the value of ExpiresAt, if it is not null. If Disabled and null, + GetRecord will return null. - `"FeatureName"`: List of names of Features to be retrieved. If not specified, the latest value for all the Features are returned. """ @@ -173,14 +189,21 @@ end put_record(feature_group_name, record) put_record(feature_group_name, record, params::Dict{String,<:Any}) -Used for data ingestion into the FeatureStore. The PutRecord API writes to both the -OnlineStore and OfflineStore. If the record is the latest record for the recordIdentifier, -the record is written to both the OnlineStore and OfflineStore. If the record is a historic -record, it is written only to the OfflineStore. +The PutRecord API is used to ingest a list of Records into your feature group. If a new +record’s EventTime is greater, the new record is written to both the OnlineStore and +OfflineStore. Otherwise, the record is a historic record and it is written only to the +OfflineStore. You can specify the ingestion to be applied to the OnlineStore, +OfflineStore, or both by using the TargetStores request parameter. You can set the +ingested record to expire at a given time to live (TTL) duration after the record’s event +time, ExpiresAt = EventTime + TtlDuration, by specifying the TtlDuration parameter. A +record level TtlDuration is set when specifying the TtlDuration parameter using the +PutRecord API call. If the input TtlDuration is null or unspecified, TtlDuration is set to +the default feature group level TtlDuration. A record level TtlDuration supersedes the +group level TtlDuration. # Arguments -- `feature_group_name`: The name of the feature group that you want to insert the record - into. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group that + you want to insert the record into. - `record`: List of FeatureValues to be inserted. This will be a full over-write. If you only want to update few of the feature values, do the following: Use GetRecord to retrieve the latest record. Update the record returned from GetRecord. Use PutRecord @@ -190,6 +213,9 @@ record, it is written only to the OfflineStore. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"TargetStores"`: A list of stores to which you're adding the record. By default, Feature Store adds the record to all of the stores that you're using for the FeatureGroup. +- `"TtlDuration"`: Time to live duration, where the record is hard deleted after the + expiration time is reached; ExpiresAt = EventTime + TtlDuration. For information on + HardDelete, see the DeleteRecord API in the Amazon SageMaker API Reference guide. """ function put_record( FeatureGroupName, Record; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/sagemaker_runtime.jl b/src/services/sagemaker_runtime.jl index 254db55d1e..8e95d7b8aa 100644 --- a/src/services/sagemaker_runtime.jl +++ b/src/services/sagemaker_runtime.jl @@ -32,7 +32,7 @@ token that is supplied by the caller. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Accept"`: The desired MIME type of the inference in the response. +- `"Accept"`: The desired MIME type of the inference response from the model container. - `"Content-Type"`: The MIME type of the input data in the request body. - `"X-Amzn-SageMaker-Custom-Attributes"`: Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The @@ -44,11 +44,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your - post-processing function. This feature is currently supported in the Amazon Web Services + post-processing function. This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK. - `"X-Amzn-SageMaker-Enable-Explanations"`: An optional JMESPath expression used to override the EnableExplanations parameter of the ClarifyExplainerConfig API. See the EnableExplanations section in the developer guide for more information. +- `"X-Amzn-SageMaker-Inference-Component"`: If the endpoint hosts one or more inference + components, this parameter specifies the name of inference component to invoke. - `"X-Amzn-SageMaker-Inference-Id"`: If you provide a value, it is added to the captured data when you enable data capture on the endpoint. For information about data capture, see Capture Data. @@ -101,20 +103,21 @@ before you receive a response from this API. The response from this API will not the result of the inference request but contain information about where you can locate it. Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers -outside those enumerated in the request syntax. Calls to InvokeEndpointAsync are +outside those enumerated in the request syntax. Calls to InvokeEndpointAsync are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference. # Arguments - `endpoint_name`: The name of the endpoint that you specified when you created the - endpoint using the CreateEndpoint API. + endpoint using the CreateEndpoint API. - `x-_amzn-_sage_maker-_input_location`: The Amazon S3 URI where the inference request payload is stored. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"X-Amzn-SageMaker-Accept"`: The desired MIME type of the inference in the response. +- `"X-Amzn-SageMaker-Accept"`: The desired MIME type of the inference response from the + model container. - `"X-Amzn-SageMaker-Content-Type"`: The MIME type of the input data in the request body. - `"X-Amzn-SageMaker-Custom-Attributes"`: Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The @@ -178,3 +181,88 @@ function invoke_endpoint_async( feature_set=SERVICE_FEATURE_SET, ) end + +""" + invoke_endpoint_with_response_stream(body, endpoint_name) + invoke_endpoint_with_response_stream(body, endpoint_name, params::Dict{String,<:Any}) + +Invokes a model at the specified endpoint to return the inference response as a stream. The +inference stream provides the response payload incrementally as a series of parts. Before +you can get an inference stream, you must have access to a model that's deployed using +Amazon SageMaker hosting services, and the container for that model must support inference +streaming. For more information that can help you use this API, see the following sections +in the Amazon SageMaker Developer Guide: For information about how to add streaming +support to a model, see How Containers Serve Requests. For information about how to +process the streaming response, see Invoke real-time endpoints. Before you can use this +operation, your IAM permissions must allow the sagemaker:InvokeEndpoint action. For more +information about Amazon SageMaker actions for IAM policies, see Actions, resources, and +condition keys for Amazon SageMaker in the IAM Service Authorization Reference. Amazon +SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might +add additional headers. You should not rely on the behavior of headers outside those +enumerated in the request syntax. Calls to InvokeEndpointWithResponseStream are +authenticated by using Amazon Web Services Signature Version 4. For information, see +Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API +Reference. + +# Arguments +- `body`: Provides input data, in the format specified in the ContentType request header. + Amazon SageMaker passes all of the data in the body to the model. For information about + the format of the request body, see Common Data Formats-Inference. +- `endpoint_name`: The name of the endpoint that you specified when you created the + endpoint using the CreateEndpoint API. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Content-Type"`: The MIME type of the input data in the request body. +- `"X-Amzn-SageMaker-Accept"`: The desired MIME type of the inference response from the + model container. +- `"X-Amzn-SageMaker-Custom-Attributes"`: Provides additional information about a request + for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The + information is an opaque value that is forwarded verbatim. You could use this value, for + example, to provide an ID that you can use to track a request or to provide other metadata + that a service endpoint was programmed to process. The value must consist of no more than + 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of + the Hypertext Transfer Protocol (HTTP/1.1). The code in your model is responsible for + setting or updating any custom attributes in the response. If your code does not set this + value in the response, an empty value is returned. For example, if a custom attribute + represents the trace ID, your model can prepend the custom attribute with Trace ID: in your + post-processing function. This feature is currently supported in the Amazon Web Services + SDKs but not in the Amazon SageMaker Python SDK. +- `"X-Amzn-SageMaker-Inference-Component"`: If the endpoint hosts one or more inference + components, this parameter specifies the name of inference component to invoke for a + streaming response. +- `"X-Amzn-SageMaker-Inference-Id"`: An identifier that you assign to your request. +- `"X-Amzn-SageMaker-Target-Container-Hostname"`: If the endpoint hosts multiple containers + and is configured to use direct invocation, this parameter specifies the host name of the + container to invoke. +- `"X-Amzn-SageMaker-Target-Variant"`: Specify the production variant to send the inference + request to when invoking an endpoint that is running two or more variants. Note that this + parameter overrides the default behavior for the endpoint, which is to distribute the + invocation traffic based on the variant weights. For information about how to use variant + targeting to perform a/b testing, see Test models in production +""" +function invoke_endpoint_with_response_stream( + Body, EndpointName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker_runtime( + "POST", + "/endpoints/$(EndpointName)/invocations-response-stream", + Dict{String,Any}("Body" => Body); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_endpoint_with_response_stream( + Body, + EndpointName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker_runtime( + "POST", + "/endpoints/$(EndpointName)/invocations-response-stream", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Body" => Body), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/savingsplans.jl b/src/services/savingsplans.jl index 3023a981e6..3b72178c45 100644 --- a/src/services/savingsplans.jl +++ b/src/services/savingsplans.jl @@ -11,19 +11,20 @@ using AWS.UUIDs Creates a Savings Plan. # Arguments -- `commitment`: The hourly commitment, in USD. This is a value between 0.001 and 1 million. - You cannot specify more than five digits after the decimal point. +- `commitment`: The hourly commitment, in the same currency of the savingsPlanOfferingId. + This is a value between 0.001 and 1 million. You cannot specify more than five digits after + the decimal point. - `savings_plan_offering_id`: The ID of the offering. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. -- `"purchaseTime"`: The time at which to purchase the Savings Plan, in UTC format +- `"purchaseTime"`: The purchase time of the Savings Plan in UTC format (YYYY-MM-DDTHH:MM:SSZ). - `"tags"`: One or more tags. - `"upfrontPaymentAmount"`: The up-front payment amount. This is a whole number between 50 - and 99 percent of the total value of the Savings Plan. This parameter is supported only if + and 99 percent of the total value of the Savings Plan. This parameter is only supported if the payment option is Partial Upfront. """ function create_savings_plan( @@ -107,7 +108,7 @@ end describe_savings_plan_rates(savings_plan_id) describe_savings_plan_rates(savings_plan_id, params::Dict{String,<:Any}) -Describes the specified Savings Plans rates. +Describes the rates for the specified Savings Plan. # Arguments - `savings_plan_id`: The ID of the Savings Plan. @@ -160,7 +161,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: The token for the next page of results. - `"savingsPlanArns"`: The Amazon Resource Names (ARN) of the Savings Plans. - `"savingsPlanIds"`: The IDs of the Savings Plans. -- `"states"`: The states. +- `"states"`: The current states of the Savings Plans. """ function describe_savings_plans(; aws_config::AbstractAWSConfig=global_aws_config()) return savingsplans( @@ -186,7 +187,7 @@ end describe_savings_plans_offering_rates() describe_savings_plans_offering_rates(params::Dict{String,<:Any}) -Describes the specified Savings Plans offering rates. +Describes the offering rates for the specified Savings Plans. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -194,8 +195,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value. - `"nextToken"`: The token for the next page of results. -- `"operations"`: The specific AWS operation for the line item in the billing report. -- `"products"`: The AWS products. +- `"operations"`: The specific Amazon Web Services operation for the line item in the + billing report. +- `"products"`: The Amazon Web Services products. - `"savingsPlanOfferingIds"`: The IDs of the offerings. - `"savingsPlanPaymentOptions"`: The payment options. - `"savingsPlanTypes"`: The plan types. @@ -228,21 +230,22 @@ end describe_savings_plans_offerings() describe_savings_plans_offerings(params::Dict{String,<:Any}) -Describes the specified Savings Plans offerings. +Describes the offerings for the specified Savings Plans. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"currencies"`: The currencies. - `"descriptions"`: The descriptions. -- `"durations"`: The durations, in seconds. +- `"durations"`: The duration, in seconds. - `"filters"`: The filters. - `"maxResults"`: The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value. - `"nextToken"`: The token for the next page of results. - `"offeringIds"`: The IDs of the offerings. -- `"operations"`: The specific AWS operation for the line item in the billing report. +- `"operations"`: The specific Amazon Web Services operation for the line item in the + billing report. - `"paymentOptions"`: The payment options. -- `"planTypes"`: The plan type. +- `"planTypes"`: The plan types. - `"productType"`: The product type. - `"serviceCodes"`: The services. - `"usageTypes"`: The usage details of the line item in the billing report. @@ -306,6 +309,55 @@ function list_tags_for_resource( ) end +""" + return_savings_plan(savings_plan_id) + return_savings_plan(savings_plan_id, params::Dict{String,<:Any}) + +Returns the specified Savings Plan. + +# Arguments +- `savings_plan_id`: The ID of the Savings Plan. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. +""" +function return_savings_plan( + savingsPlanId; aws_config::AbstractAWSConfig=global_aws_config() +) + return savingsplans( + "POST", + "/ReturnSavingsPlan", + Dict{String,Any}( + "savingsPlanId" => savingsPlanId, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function return_savings_plan( + savingsPlanId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return savingsplans( + "POST", + "/ReturnSavingsPlan", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "savingsPlanId" => savingsPlanId, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) diff --git a/src/services/scheduler.jl b/src/services/scheduler.jl index 112342dd72..c4265bac0e 100644 --- a/src/services/scheduler.jl +++ b/src/services/scheduler.jl @@ -16,7 +16,7 @@ Creates the specified schedule. - `name`: The name of the schedule that you are creating. - `schedule_expression`: The expression that defines when the schedule runs. The following formats are supported. at expression - at(yyyy-mm-ddThh:mm:ss) rate expression - - rate(unit value) cron expression - cron(fields) You can use at expressions to + rate(value unit) cron expression - cron(fields) You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such @@ -31,6 +31,8 @@ Creates the specified schedule. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActionAfterCompletion"`: Specifies the action that EventBridge Scheduler applies to the + schedule after the schedule completes invoking the target. - `"ClientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency. @@ -470,7 +472,7 @@ note of all optional parameters for your UpdateSchedule call. - `name`: The name of the schedule that you are updating. - `schedule_expression`: The expression that defines when the schedule runs. The following formats are supported. at expression - at(yyyy-mm-ddThh:mm:ss) rate expression - - rate(unit value) cron expression - cron(fields) You can use at expressions to + rate(value unit) cron expression - cron(fields) You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such @@ -486,6 +488,8 @@ note of all optional parameters for your UpdateSchedule call. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActionAfterCompletion"`: Specifies the action that EventBridge Scheduler applies to the + schedule after the schedule completes invoking the target. - `"ClientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency. diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 718c6b50c4..90c4910c41 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -4,6 +4,55 @@ using AWS.AWSServices: secrets_manager using AWS.Compat using AWS.UUIDs +""" + batch_get_secret_value() + batch_get_secret_value(params::Dict{String,<:Any}) + +Retrieves the contents of the encrypted fields SecretString or SecretBinary for up to 20 +secrets. To retrieve a single secret, call GetSecretValue. To choose which secrets to +retrieve, you can specify a list of secrets by name or ARN, or you can use filters. If +Secrets Manager encounters errors such as AccessDeniedException while attempting to +retrieve any of the secrets, you can see the errors in Errors in the response. Secrets +Manager generates CloudTrail GetSecretValue log entries for each secret you request when +you call this action. Do not include sensitive information in request parameters because it +might be logged. For more information, see Logging Secrets Manager events with CloudTrail. +Required permissions: secretsmanager:BatchGetSecretValue, and you must have +secretsmanager:GetSecretValue for each secret. If you use filters, you must also have +secretsmanager:ListSecrets. If the secrets are encrypted using customer-managed keys +instead of the Amazon Web Services managed key aws/secretsmanager, then you also need +kms:Decrypt permissions for the keys. For more information, see IAM policy actions for +Secrets Manager and Authentication and access control in Secrets Manager. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filters to choose which secrets to retrieve. You must include Filters or + SecretIdList, but not both. +- `"MaxResults"`: The number of results to include in the response. If there are more + results available, in the response, Secrets Manager includes NextToken. To get the next + results, call BatchGetSecretValue again with the value from NextToken. To use this + parameter, you must also use the Filters parameter. +- `"NextToken"`: A token that indicates where the output should continue from, if a + previous call did not show all results. To get the next results, call BatchGetSecretValue + again with this value. +- `"SecretIdList"`: The ARN or names of the secrets to retrieve. You must include Filters + or SecretIdList, but not both. +""" +function batch_get_secret_value(; aws_config::AbstractAWSConfig=global_aws_config()) + return secrets_manager( + "BatchGetSecretValue"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function batch_get_secret_value( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return secrets_manager( + "BatchGetSecretValue", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_rotate_secret(secret_id) cancel_rotate_secret(secret_id, params::Dict{String,<:Any}) @@ -80,10 +129,11 @@ entry when you call this action. Do not include sensitive information in request except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need -secretsmanager:TagResource. For more information, see IAM policy actions for Secrets -Manager and Authentication and access control in Secrets Manager. To encrypt the secret -with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt -permission to the key. +secretsmanager:TagResource. To add replica Regions, you must also have +secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for +Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the +secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and +kms:Decrypt permission to the key. # Arguments - `name`: The name of the new secret. The secret name can contain ASCII letters, numbers, @@ -100,20 +150,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys identifier for the new version. If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this - parameter in the request. If you don't use the SDK and instead generate a raw HTTP request - to the Secrets Manager service endpoint, then you must generate a ClientRequestToken - yourself for the new version and include the value in the request. This value helps ensure - idempotency. Secrets Manager uses this value to prevent the accidental creation of - duplicate versions if there are failures and retries during a rotation. We recommend that - you generate a UUID-type value to ensure uniqueness of your versions within the specified - secret. If the ClientRequestToken value isn't already associated with a version of the - secret then a new version of the secret is created. If a version with this value already - exists and the version SecretString and SecretBinary values are the same as those in the - request, then the request is ignored. If a version with this value already exists and - that version's SecretString and SecretBinary values are different from those in the - request, then the request fails because you cannot modify an existing version. Instead, use - PutSecretValue to create a new version. This value becomes the VersionId of the new - version. + parameter in the request. If you generate a raw HTTP request to the Secrets Manager + service endpoint, then you must generate a ClientRequestToken and include it in the + request. This value helps ensure idempotency. Secrets Manager uses this value to prevent + the accidental creation of duplicate versions if there are failures and retries during a + rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your + versions within the specified secret. If the ClientRequestToken value isn't already + associated with a version of the secret then a new version of the secret is created. If + a version with this value already exists and the version SecretString and SecretBinary + values are the same as those in the request, then the request is ignored. If a version + with this value already exists and that version's SecretString and SecretBinary values are + different from those in the request, then the request fails because you cannot modify an + existing version. Instead, use PutSecretValue to create a new version. This value becomes + the VersionId of the new version. - `"Description"`: The description of the secret. - `"ForceOverwriteReplicaSecret"`: Specifies whether to overwrite a secret with the same name in the destination Region. By default, secrets aren't overwritten. @@ -129,13 +178,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SecretBinary"`: The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretString or SecretBinary must have a value, but not both. - This parameter is not available in the Secrets Manager console. + This parameter is not available in the Secrets Manager console. Sensitive: This field + contains sensitive information, so the service does not include it in CloudTrail log + entries. If you create your own log entries, you must also avoid logging the information in + this field. - `"SecretString"`: The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretString or SecretBinary must have a value, but not both. If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a - JSON structure of key/value pairs that a Lambda rotation function can parse. + JSON structure of key/value pairs that a Lambda rotation function can parse. Sensitive: + This field contains sensitive information, so the service does not include it in CloudTrail + log entries. If you create your own log entries, you must also avoid logging the + information in this field. - `"Tags"`: A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example: [{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"productio @@ -148,16 +203,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys tags that match secrets' tags. For information about how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to - avoid confusion with the double quotes required in the JSON text. The following - restrictions apply to tags: Maximum number of tags per secret: 50 Maximum key length: - 127 Unicode characters in UTF-8 Maximum value length: 255 Unicode characters in UTF-8 - Tag keys and values are case sensitive. Do not use the aws: prefix in your tag names or - values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit - or delete tag names or values with this prefix. Tags with this prefix do not count against - your tags per secret limit. If you use your tagging schema across multiple services and - resources, other services might have restrictions on allowed characters. Generally allowed - characters: letters, spaces, and numbers representable in UTF-8, plus the following special - characters: + - = . _ : / @. + avoid confusion with the double quotes required in the JSON text. For tag quotas and naming + restrictions, see Service quotas for Tagging in the Amazon Web Services General Reference + guide. """ function create_secret(Name; aws_config::AbstractAWSConfig=global_aws_config()) return secrets_manager( @@ -345,10 +393,10 @@ end get_random_password(params::Dict{String,<:Any}) Generates a random password. We recommend that you specify the maximum length and include -every character type that the system you are generating a password for can support. Secrets -Manager generates a CloudTrail log entry when you call this action. Do not include -sensitive information in request parameters because it might be logged. For more -information, see Logging Secrets Manager events with CloudTrail. Required permissions: +every character type that the system you are generating a password for can support. By +default, Secrets Manager uses uppercase and lowercase letters, numbers, and the following +characters in passwords: !\"#%&'()*+,-./:;<=>?@[]^_`{|}~ Secrets Manager +generates a CloudTrail log entry when you call this action. Required permissions: secretsmanager:GetRandomPassword. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. @@ -431,22 +479,24 @@ end get_secret_value(secret_id, params::Dict{String,<:Any}) Retrieves the contents of the encrypted fields SecretString or SecretBinary from the -specified version of a secret, whichever contains content. We recommend that you cache your -secret values by using client-side caching. Caching secrets improves speed and reduces your -costs. For more information, see Cache secrets for your applications. To retrieve the -previous version of a secret, use VersionStage and specify AWSPREVIOUS. To revert to the -previous version of a secret, call UpdateSecretVersionStage. Secrets Manager generates a -CloudTrail log entry when you call this action. Do not include sensitive information in -request parameters because it might be logged. For more information, see Logging Secrets -Manager events with CloudTrail. Required permissions: secretsmanager:GetSecretValue. If -the secret is encrypted using a customer-managed key instead of the Amazon Web Services -managed key aws/secretsmanager, then you also need kms:Decrypt permissions for that key. -For more information, see IAM policy actions for Secrets Manager and Authentication and -access control in Secrets Manager. +specified version of a secret, whichever contains content. To retrieve the values for a +group of secrets, call BatchGetSecretValue. We recommend that you cache your secret values +by using client-side caching. Caching secrets improves speed and reduces your costs. For +more information, see Cache secrets for your applications. To retrieve the previous version +of a secret, use VersionStage and specify AWSPREVIOUS. To revert to the previous version of +a secret, call UpdateSecretVersionStage. Secrets Manager generates a CloudTrail log entry +when you call this action. Do not include sensitive information in request parameters +because it might be logged. For more information, see Logging Secrets Manager events with +CloudTrail. Required permissions: secretsmanager:GetSecretValue. If the secret is +encrypted using a customer-managed key instead of the Amazon Web Services managed key +aws/secretsmanager, then you also need kms:Decrypt permissions for that key. For more +information, see IAM policy actions for Secrets Manager and Authentication and access +control in Secrets Manager. # Arguments -- `secret_id`: The ARN or name of the secret to retrieve. For an ARN, we recommend that you - specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN. +- `secret_id`: The ARN or name of the secret to retrieve. To retrieve a secret from another + account, you must use an ARN. For an ARN, we recommend that you specify a complete ARN + rather than a partial ARN. See Finding a secret from a partial ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -546,16 +596,17 @@ end Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use -the Secrets Manager console. ListSecrets is eventually consistent, however it might not -reflect changes from the last five minutes. To get the latest information for a specific -secret, use DescribeSecret. To list the versions of a secret, use ListSecretVersionIds. To -get the secret value from SecretString or SecretBinary, call GetSecretValue. For -information about finding secrets in the console, see Find secrets in Secrets Manager. -Secrets Manager generates a CloudTrail log entry when you call this action. Do not include -sensitive information in request parameters because it might be logged. For more -information, see Logging Secrets Manager events with CloudTrail. Required permissions: -secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets -Manager and Authentication and access control in Secrets Manager. +the Secrets Manager console. All Secrets Manager operations are eventually consistent. +ListSecrets might not reflect changes from the last five minutes. You can get more recent +information for a specific secret by calling DescribeSecret. To list the versions of a +secret, use ListSecretVersionIds. To retrieve the values for the secrets, call +BatchGetSecretValue or GetSecretValue. For information about finding secrets in the +console, see Find secrets in Secrets Manager. Secrets Manager generates a CloudTrail log +entry when you call this action. Do not include sensitive information in request parameters +because it might be logged. For more information, see Logging Secrets Manager events with +CloudTrail. Required permissions: secretsmanager:ListSecrets. For more information, see +IAM policy actions for Secrets Manager and Authentication and access control in Secrets +Manager. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -607,7 +658,14 @@ actions for Secrets Manager and Authentication and access control in Secrets Man Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BlockPublicPolicy"`: Specifies whether to block resource-based policies that allow broad access to the secret, for example those that use a wildcard for the principal. By - default, public policies aren't blocked. + default, public policies aren't blocked. Resource policy validation and the + BlockPublicPolicy parameter help protect your resources by preventing public access from + being granted through the resource policies that are directly attached to your secrets. In + addition to using these features, carefully inspect the following policies to confirm that + they do not grant public access: Identity-based policies attached to associated Amazon + Web Services principals (for example, IAM roles) Resource-based policies attached to + associated Amazon Web Services resources (for example, Key Management Service (KMS) keys) + To review permissions to your secrets, see Determine who has permissions to your secrets. """ function put_resource_policy( ResourcePolicy, SecretId; aws_config::AbstractAWSConfig=global_aws_config() @@ -664,10 +722,10 @@ VersionId, and you specify the same secret data, the operation succeeds but does However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request -parameters except SecretBinary or SecretString because it might be logged. For more -information, see Logging Secrets Manager events with CloudTrail. Required permissions: -secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets -Manager and Authentication and access control in Secrets Manager. +parameters except SecretBinary, SecretString, or RotationToken because it might be logged. +For more information, see Logging Secrets Manager events with CloudTrail. Required +permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions +for Secrets Manager and Authentication and access control in Secrets Manager. # Arguments - `secret_id`: The ARN or name of the secret to add a new version to. For an ARN, we @@ -678,13 +736,13 @@ Manager and Authentication and access control in Secrets Manager. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the new version of the secret. If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this - operation, then you can leave this parameter empty because they generate a random UUID for - you. If you don't use the SDK and instead generate a raw HTTP request to the Secrets - Manager service endpoint, then you must generate a ClientRequestToken yourself for new - versions and include that value in the request. This value helps ensure idempotency. + operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID + for you and includes it as the value for this parameter in the request. If you generate a + raw HTTP request to the Secrets Manager service endpoint, then you must generate a + ClientRequestToken and include it in the request. This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if - there are failures and retries during the Lambda rotation function processing. We recommend - that you generate a UUID-type value to ensure uniqueness within the specified secret. If + there are failures and retries during a rotation. We recommend that you generate a + UUID-type value to ensure uniqueness of your versions within the specified secret. If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created. If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request @@ -693,14 +751,25 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values. This value becomes the VersionId of the new version. +- `"RotationToken"`: A unique identifier that indicates the source of the request. For + cross-account rotation (when you rotate a secret in one account by using a Lambda rotation + function in another account) and the Lambda rotation function assumes an IAM role to call + Secrets Manager, Secrets Manager validates the identity with the rotation token. For more + information, see How rotation works. Sensitive: This field contains sensitive information, + so the service does not include it in CloudTrail log entries. If you create your own log + entries, you must also avoid logging the information in this field. - `"SecretBinary"`: The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. You must include SecretBinary or SecretString, but not both. You can't access this value from the Secrets - Manager console. + Manager console. Sensitive: This field contains sensitive information, so the service does + not include it in CloudTrail log entries. If you create your own log entries, you must also + avoid logging the information in this field. - `"SecretString"`: The text to encrypt and store in the new version of the secret. You must include SecretBinary or SecretString, but not both. We recommend you create the secret - string as JSON key/value pairs, as shown in the example. + string as JSON key/value pairs, as shown in the example. Sensitive: This field contains + sensitive information, so the service does not include it in CloudTrail log entries. If you + create your own log entries, you must also avoid logging the information in this field. - `"VersionStages"`: A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process. If you specify a staging label that's already associated with a different version @@ -798,8 +867,11 @@ Replicates the secret to a new Regions. See Multi-Region secrets. Secrets Manage a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: -secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for -Secrets Manager and Authentication and access control in Secrets Manager. +secretsmanager:ReplicateSecretToRegions. If the primary secret is encrypted with a KMS key +other than aws/secretsmanager, you also need kms:Decrypt permission to the key. To encrypt +the replicated secret with a KMS key other than aws/secretsmanager, you need +kms:GenerateDataKey and kms:Encrypt to the key. For more information, see IAM policy +actions for Secrets Manager and Authentication and access control in Secrets Manager. # Arguments - `add_replica_regions`: A list of Regions in which to replicate the secret. @@ -911,18 +983,17 @@ Permissions for rotation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ClientRequestToken"`: A unique identifier for the new version of the secret that helps - ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of - duplicate versions if there are failures and retries during rotation. This value becomes - the VersionId of the new version. If you use the Amazon Web Services CLI or one of the - Amazon Web Services SDK to call this operation, then you can leave this parameter empty. - The CLI or SDK generates a random UUID for you and includes that in the request for this - parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets - Manager service endpoint, then you must generate a ClientRequestToken yourself for new - versions and include that value in the request. You only need to specify this value if you - implement your own retry logic and you want to ensure that Secrets Manager doesn't attempt - to create a secret version twice. We recommend that you generate a UUID-type value to - ensure uniqueness within the specified secret. +- `"ClientRequestToken"`: A unique identifier for the new version of the secret. You only + need to specify this value if you implement your own retry logic and you want to ensure + that Secrets Manager doesn't attempt to create a secret version twice. If you use the + Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then + you can leave this parameter empty. The CLI or SDK generates a random UUID for you and + includes it as the value for this parameter in the request. If you generate a raw HTTP + request to the Secrets Manager service endpoint, then you must generate a + ClientRequestToken and include it in the request. This value helps ensure idempotency. + Secrets Manager uses this value to prevent the accidental creation of duplicate versions if + there are failures and retries during a rotation. We recommend that you generate a + UUID-type value to ensure uniqueness of your versions within the specified secret. - `"RotateImmediately"`: Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in RotateSecretRequestRotationRules. For secrets that use a Lambda rotation function to @@ -1012,23 +1083,16 @@ end Attaches tags to a secret. Tags consist of a key name and a value. Tags are part of the secret's metadata. They are not associated with specific versions of the secret. This -operation appends tags to the existing list of tags. The following restrictions apply to -tags: Maximum number of tags per secret: 50 Maximum key length: 127 Unicode characters -in UTF-8 Maximum value length: 255 Unicode characters in UTF-8 Tag keys and values are -case sensitive. Do not use the aws: prefix in your tag names or values because Amazon Web -Services reserves it for Amazon Web Services use. You can't edit or delete tag names or -values with this prefix. Tags with this prefix do not count against your tags per secret -limit. If you use your tagging schema across multiple services and resources, other -services might have restrictions on allowed characters. Generally allowed characters: -letters, spaces, and numbers representable in UTF-8, plus the following special characters: -+ - = . _ : / @. If you use tags as part of your security strategy, then adding or -removing a tag can change permissions. If successfully completing this operation would -result in you losing your permissions for this secret, then the operation is blocked and -returns an Access Denied error. Secrets Manager generates a CloudTrail log entry when you -call this action. Do not include sensitive information in request parameters because it -might be logged. For more information, see Logging Secrets Manager events with CloudTrail. -Required permissions: secretsmanager:TagResource. For more information, see IAM policy -actions for Secrets Manager and Authentication and access control in Secrets Manager. +operation appends tags to the existing list of tags. For tag quotas and naming +restrictions, see Service quotas for Tagging in the Amazon Web Services General Reference +guide. If you use tags as part of your security strategy, then adding or removing a tag +can change permissions. If successfully completing this operation would result in you +losing your permissions for this secret, then the operation is blocked and returns an +Access Denied error. Secrets Manager generates a CloudTrail log entry when you call this +action. Do not include sensitive information in request parameters because it might be +logged. For more information, see Logging Secrets Manager events with CloudTrail. Required +permissions: secretsmanager:TagResource. For more information, see IAM policy actions for +Secrets Manager and Authentication and access control in Secrets Manager. # Arguments - `secret_id`: The identifier for the secret to attach tags to. You can specify either the @@ -1149,8 +1213,10 @@ except SecretBinary or SecretString because it might be logged. For more informa Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer -managed key, you must also have kms:GenerateDataKey and kms:Decrypt permissions on the key. -For more information, see Secret encryption and decryption. +managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt +permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission +to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new +key. For more information, see Secret encryption and decryption. # Arguments - `secret_id`: The ARN or name of the secret. For an ARN, we recommend that you specify a @@ -1163,33 +1229,42 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for the new version. If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the - request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets - Manager service endpoint, then you must generate a ClientRequestToken yourself for the new - version and include the value in the request. This value becomes the VersionId of the new - version. + request. If you generate a raw HTTP request to the Secrets Manager service endpoint, then + you must generate a ClientRequestToken and include it in the request. This value helps + ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of + duplicate versions if there are failures and retries during a rotation. We recommend that + you generate a UUID-type value to ensure uniqueness of your versions within the specified + secret. - `"Description"`: The description of the secret. - `"KmsKeyId"`: The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels - AWSCURRENT, AWSPENDING, or AWSPREVIOUS. For more information about versions and staging - labels, see Concepts: Version. A key alias is always prefixed by alias/, for example - alias/aws/secretsmanager. For more information, see About aliases. If you set this to an - empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. - If this key doesn't already exist in your account, then Secrets Manager creates it for you - automatically. All users and roles in the Amazon Web Services account automatically have - access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time - significant delay in returning the result. You can only use the Amazon Web Services - managed key aws/secretsmanager if you call this operation using credentials from the same - Amazon Web Services account that owns the secret. If the secret is in a different account, - then you must use a customer managed key and provide the ARN of that KMS key in this field. - The user making the call must have permissions to both the secret and the KMS key in their - respective accounts. + AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new + key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more + information about versions and staging labels, see Concepts: Version. A key alias is always + prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About + aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services + managed key aws/secretsmanager. If this key doesn't already exist in your account, then + Secrets Manager creates it for you automatically. All users and roles in the Amazon Web + Services account automatically have access to use aws/secretsmanager. Creating + aws/secretsmanager can result in a one-time significant delay in returning the result. + You can only use the Amazon Web Services managed key aws/secretsmanager if you call this + operation using credentials from the same Amazon Web Services account that owns the secret. + If the secret is in a different account, then you must use a customer managed key and + provide the ARN of that KMS key in this field. The user making the call must have + permissions to both the secret and the KMS key in their respective accounts. - `"SecretBinary"`: The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. - You can't access this parameter in the Secrets Manager console. + You can't access this parameter in the Secrets Manager console. Sensitive: This field + contains sensitive information, so the service does not include it in CloudTrail log + entries. If you create your own log entries, you must also avoid logging the information in + this field. - `"SecretString"`: The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either - SecretBinary or SecretString must have a value, but not both. + SecretBinary or SecretString must have a value, but not both. Sensitive: This field + contains sensitive information, so the service does not include it in CloudTrail log + entries. If you create your own log entries, you must also avoid logging the information in + this field. """ function update_secret(SecretId; aws_config::AbstractAWSConfig=global_aws_config()) return secrets_manager( @@ -1313,7 +1388,8 @@ Manager and Authentication and access control in Secrets Manager. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"SecretId"`: This field is reserved for internal use. +- `"SecretId"`: The ARN or name of the secret with the resource-based policy you want to + validate. """ function validate_resource_policy( ResourcePolicy; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/securityhub.jl b/src/services/securityhub.jl index 804614c910..8ea1cbcda9 100644 --- a/src/services/securityhub.jl +++ b/src/services/securityhub.jl @@ -283,6 +283,58 @@ function batch_get_automation_rules( ) end +""" + batch_get_configuration_policy_associations(configuration_policy_association_identifiers) + batch_get_configuration_policy_associations(configuration_policy_association_identifiers, params::Dict{String,<:Any}) + + Returns associations between an Security Hub configuration and a batch of target accounts, +organizational units, or the root. Only the Security Hub delegated administrator can invoke +this operation from the home Region. A configuration can refer to a configuration policy or +to a self-managed configuration. + +# Arguments +- `configuration_policy_association_identifiers`: Specifies one or more target account + IDs, organizational unit (OU) IDs, or the root ID to retrieve associations for. + +""" +function batch_get_configuration_policy_associations( + ConfigurationPolicyAssociationIdentifiers; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "POST", + "/configurationPolicyAssociation/batchget", + Dict{String,Any}( + "ConfigurationPolicyAssociationIdentifiers" => + ConfigurationPolicyAssociationIdentifiers, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_configuration_policy_associations( + ConfigurationPolicyAssociationIdentifiers, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "POST", + "/configurationPolicyAssociation/batchget", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationPolicyAssociationIdentifiers" => + ConfigurationPolicyAssociationIdentifiers, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_security_controls(security_control_ids) batch_get_security_controls(security_control_ids, params::Dict{String,<:Any}) @@ -653,8 +705,9 @@ end - `actions`: One or more actions to update finding fields if a finding matches the conditions specified in Criteria. - `criteria`: A set of ASFF finding field attributes and corresponding expected values - that Security Hub uses to filter findings. If a finding matches the conditions specified in - this parameter, Security Hub applies the rule action to the finding. + that Security Hub uses to filter findings. If a rule is enabled and a finding matches the + conditions specified in this parameter, Security Hub applies the rule action to the + finding. - `description`: A description of the rule. - `rule_name`: The name of the rule. - `rule_order`: An integer ranging from 1 to 1000 that represents the order in which the @@ -665,15 +718,14 @@ end Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IsTerminal"`: Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria - for multiple rules, and each rule has different actions. If the value of this field is set - to true for a rule, Security Hub applies the rule action to a finding that matches the rule - criteria and won't evaluate other rules for the finding. The default value of this field is - false. + for multiple rules, and each rule has different actions. If a rule is terminal, Security + Hub applies the rule action to a finding that matches the rule criteria and doesn't + evaluate other rules for the finding. By default, a rule isn't terminal. - `"RuleStatus"`: Whether the rule is active after it is created. If this parameter is - equal to Enabled, Security Hub will apply the rule to findings and finding updates after - the rule is created. To change the value of this parameter after creating a rule, use - BatchUpdateAutomationRules. -- `"Tags"`: User-defined tags that help you label the purpose of a rule. + equal to ENABLED, Security Hub starts applying the rule to findings and finding updates + after the rule is created. To change the value of this parameter after creating a rule, use + BatchUpdateAutomationRules . +- `"Tags"`: User-defined tags associated with an automation rule. """ function create_automation_rule( Actions, @@ -727,6 +779,65 @@ function create_automation_rule( ) end +""" + create_configuration_policy(configuration_policy, name) + create_configuration_policy(configuration_policy, name, params::Dict{String,<:Any}) + + Creates a configuration policy with the defined configuration. Only the Security Hub +delegated administrator can invoke this operation from the home Region. + +# Arguments +- `configuration_policy`: An object that defines how Security Hub is configured. It + includes whether Security Hub is enabled or disabled, a list of enabled security standards, + a list of enabled or disabled security controls, and a list of custom parameter values for + specified controls. If you provide a list of security controls that are enabled in the + configuration policy, Security Hub disables all other controls (including newly released + controls). If you provide a list of security controls that are disabled in the + configuration policy, Security Hub enables all other controls (including newly released + controls). +- `name`: The name of the configuration policy. Alphanumeric characters and the following + ASCII characters are permitted: -, ., !, *, /. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the configuration policy. +- `"Tags"`: User-defined tags associated with a configuration policy. For more + information, see Tagging Security Hub resources in the Security Hub user guide. +""" +function create_configuration_policy( + ConfigurationPolicy, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicy/create", + Dict{String,Any}("ConfigurationPolicy" => ConfigurationPolicy, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configuration_policy( + ConfigurationPolicy, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "POST", + "/configurationPolicy/create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationPolicy" => ConfigurationPolicy, "Name" => Name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_finding_aggregator(region_linking_mode) create_finding_aggregator(region_linking_mode, params::Dict{String,<:Any}) @@ -977,6 +1088,44 @@ function delete_action_target( ) end +""" + delete_configuration_policy(identifier) + delete_configuration_policy(identifier, params::Dict{String,<:Any}) + + Deletes a configuration policy. Only the Security Hub delegated administrator can invoke +this operation from the home Region. For the deletion to succeed, you must first +disassociate a configuration policy from target accounts, organizational units, or the root +by invoking the StartConfigurationPolicyDisassociation operation. + +# Arguments +- `identifier`: The Amazon Resource Name (ARN) or universally unique identifier (UUID) of + the configuration policy. + +""" +function delete_configuration_policy( + Identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "DELETE", + "/configurationPolicy/$(Identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configuration_policy( + Identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "DELETE", + "/configurationPolicy/$(Identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_finding_aggregator(finding_aggregator_arn) delete_finding_aggregator(finding_aggregator_arn, params::Dict{String,<:Any}) @@ -1091,9 +1240,9 @@ end delete_members(account_ids) delete_members(account_ids, params::Dict{String,<:Any}) -Deletes the specified member accounts from Security Hub. Can be used to delete member -accounts that belong to an organization as well as member accounts that were invited -manually. +Deletes the specified member accounts from Security Hub. You can invoke this API only to +delete accounts that became members through invitation. You can't invoke this API to delete +accounts that belong to an Organizations organization. # Arguments - `account_ids`: The list of account IDs for the member accounts to delete. @@ -1185,8 +1334,8 @@ end describe_organization_configuration() describe_organization_configuration(params::Dict{String,<:Any}) -Returns information about the Organizations configuration for Security Hub. Can only be -called from a Security Hub administrator account. +Returns information about the way your organization is configured in Security Hub. Only the +Security Hub administrator account can invoke this operation. """ function describe_organization_configuration(; @@ -1394,14 +1543,14 @@ end disable_security_hub() disable_security_hub(params::Dict{String,<:Any}) -Disables Security Hub in your account only in the current Region. To disable Security Hub -in all Regions, you must submit one request per Region where you have enabled Security Hub. -When you disable Security Hub for an administrator account, it doesn't disable Security Hub -for any associated member accounts. When you disable Security Hub, your existing findings -and insights and any Security Hub configuration settings are deleted after 90 days and -cannot be recovered. Any standards that were enabled are disabled, and your administrator -and member account associations are removed. If you want to save your existing findings, -you must export them before you disable Security Hub. +Disables Security Hub in your account only in the current Amazon Web Services Region. To +disable Security Hub in all Regions, you must submit one request per Region where you have +enabled Security Hub. You can't disable Security Hub in an account that is currently the +Security Hub administrator. When you disable Security Hub, your existing findings and +insights and any Security Hub configuration settings are deleted after 90 days and cannot +be recovered. Any standards that were enabled are disabled, and your administrator and +member account associations are removed. If you want to save your existing findings, you +must export them before you disable Security Hub. """ function disable_security_hub(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1678,6 +1827,78 @@ function get_administrator_account( ) end +""" + get_configuration_policy(identifier) + get_configuration_policy(identifier, params::Dict{String,<:Any}) + + Provides information about a configuration policy. Only the Security Hub delegated +administrator can invoke this operation from the home Region. + +# Arguments +- `identifier`: The Amazon Resource Name (ARN) or universally unique identifier (UUID) of + the configuration policy. + +""" +function get_configuration_policy( + Identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "GET", + "/configurationPolicy/get/$(Identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configuration_policy( + Identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "GET", + "/configurationPolicy/get/$(Identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configuration_policy_association(target) + get_configuration_policy_association(target, params::Dict{String,<:Any}) + + Returns the association between a configuration and a target account, organizational unit, +or the root. The configuration can be a configuration policy or self-managed behavior. Only +the Security Hub delegated administrator can invoke this operation from the home Region. + +# Arguments +- `target`: The target account ID, organizational unit ID, or the root ID to retrieve the + association for. + +""" +function get_configuration_policy_association( + Target; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/get", + Dict{String,Any}("Target" => Target); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configuration_policy_association( + Target, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/get", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Target" => Target), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_enabled_standards() get_enabled_standards(params::Dict{String,<:Any}) @@ -1759,35 +1980,46 @@ changes made to any fields in the Amazon Web Services Security Finding Format (A # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EndTime"`: An ISO 8601-formatted timestamp that indicates the end time of the - requested finding history. A correctly formatted example is 2020-05-21T20:16:34.724Z. The - value cannot contain spaces, and date and time should be separated by T. For more - information, see RFC 3339 section 5.6, Internet Date/Time Format. If you provide values for - both StartTime and EndTime, Security Hub returns finding history for the specified time - period. If you provide a value for StartTime but not for EndTime, Security Hub returns - finding history from the StartTime to the time at which the API is called. If you provide a - value for EndTime but not for StartTime, Security Hub returns finding history from the - CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor - EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding - to the time at which the API is called. In all of these scenarios, the response is limited - to 100 results, and the maximum time period is limited to 90 days. + requested finding history. If you provide values for both StartTime and EndTime, Security + Hub returns finding history for the specified time period. If you provide a value for + StartTime but not for EndTime, Security Hub returns finding history from the StartTime to + the time at which the API is called. If you provide a value for EndTime but not for + StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding + to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding + history from the CreatedAt timestamp of the finding to the time at which the API is called. + In all of these scenarios, the response is limited to 100 results, and the maximum time + period is limited to 90 days. This field accepts only the specified formats. Timestamps can + end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds + is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid + timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, + 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, + 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, + 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, + 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, + 2024-01-04T15:25:10.123456789+17:59) - `"MaxResults"`: The maximum number of results to be returned. If you don’t provide it, Security Hub returns up to 100 results of finding history. - `"NextToken"`: A token for pagination purposes. Provide NULL as the initial value. In subsequent requests, provide the token included in the response to get up to an additional 100 results of finding history. If you don’t provide NextToken, Security Hub returns up to 100 results of finding history for each request. -- `"StartTime"`: An ISO 8601-formatted timestamp that indicates the start time of the - requested finding history. A correctly formatted example is 2020-05-21T20:16:34.724Z. The - value cannot contain spaces, and date and time should be separated by T. For more - information, see RFC 3339 section 5.6, Internet Date/Time Format. If you provide values for - both StartTime and EndTime, Security Hub returns finding history for the specified time - period. If you provide a value for StartTime but not for EndTime, Security Hub returns - finding history from the StartTime to the time at which the API is called. If you provide a - value for EndTime but not for StartTime, Security Hub returns finding history from the - CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor - EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding - to the time at which the API is called. In all of these scenarios, the response is limited - to 100 results, and the maximum time period is limited to 90 days. +- `"StartTime"`: A timestamp that indicates the start time of the requested finding + history. If you provide values for both StartTime and EndTime, Security Hub returns finding + history for the specified time period. If you provide a value for StartTime but not for + EndTime, Security Hub returns finding history from the StartTime to the time at which the + API is called. If you provide a value for EndTime but not for StartTime, Security Hub + returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you + provide neither StartTime nor EndTime, Security Hub returns finding history from the + CreatedAt timestamp of the finding to the time at which the API is called. In all of these + scenarios, the response is limited to 100 results, and the maximum time period is limited + to 90 days. This field accepts only the specified formats. Timestamps can end with Z or + (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited to + a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats + with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) + YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) + YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) + YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) + YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) """ function get_finding_history( FindingIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -2010,6 +2242,47 @@ function get_members( ) end +""" + get_security_control_definition(security_control_id) + get_security_control_definition(security_control_id, params::Dict{String,<:Any}) + + Retrieves the definition of a security control. The definition includes the control title, +description, Region availability, parameter definitions, and other details. + +# Arguments +- `security_control_id`: The ID of the security control to retrieve the definition for. + This field doesn’t accept an Amazon Resource Name (ARN). + +""" +function get_security_control_definition( + SecurityControlId; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "GET", + "/securityControl/definition", + Dict{String,Any}("SecurityControlId" => SecurityControlId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_security_control_definition( + SecurityControlId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "GET", + "/securityControl/definition", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("SecurityControlId" => SecurityControlId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ invite_members(account_ids) invite_members(account_ids, params::Dict{String,<:Any}) @@ -2086,6 +2359,96 @@ function list_automation_rules( ) end +""" + list_configuration_policies() + list_configuration_policies(params::Dict{String,<:Any}) + + Lists the configuration policies that the Security Hub delegated administrator has created +for your organization. Only the delegated administrator can invoke this operation from the +home Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results that's returned by + ListConfigurationPolicies in each page of the response. When this parameter is used, + ListConfigurationPolicies returns the specified number of results in a single page and a + NextToken response element. You can see the remaining results of the initial request by + sending another ListConfigurationPolicies request with the returned NextToken value. A + valid range for MaxResults is between 1 and 100. +- `"NextToken"`: The NextToken value that's returned from a previous paginated + ListConfigurationPolicies request where MaxResults was used but the results exceeded the + value of that parameter. Pagination continues from the MaxResults was used but the results + exceeded the value of that parameter. Pagination continues from the end of the previous + response that returned the NextToken value. This value is null when there are no more + results to return. +""" +function list_configuration_policies(; aws_config::AbstractAWSConfig=global_aws_config()) + return securityhub( + "GET", + "/configurationPolicy/list"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configuration_policies( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "GET", + "/configurationPolicy/list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_configuration_policy_associations() + list_configuration_policy_associations(params::Dict{String,<:Any}) + + Provides information about the associations for your configuration policies and +self-managed behavior. Only the Security Hub delegated administrator can invoke this +operation from the home Region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Options for filtering the ListConfigurationPolicyAssociations response. You + can filter by the Amazon Resource Name (ARN) or universally unique identifier (UUID) of a + configuration, AssociationType, or AssociationStatus. +- `"MaxResults"`: The maximum number of results that's returned by + ListConfigurationPolicies in each page of the response. When this parameter is used, + ListConfigurationPolicyAssociations returns the specified number of results in a single + page and a NextToken response element. You can see the remaining results of the initial + request by sending another ListConfigurationPolicyAssociations request with the returned + NextToken value. A valid range for MaxResults is between 1 and 100. +- `"NextToken"`: The NextToken value that's returned from a previous paginated + ListConfigurationPolicyAssociations request where MaxResults was used but the results + exceeded the value of that parameter. Pagination continues from the end of the previous + response that returned the NextToken value. This value is null when there are no more + results to return. +""" +function list_configuration_policy_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/list"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configuration_policy_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/list", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_enabled_products_for_import() list_enabled_products_for_import(params::Dict{String,<:Any}) @@ -2384,6 +2747,115 @@ function list_tags_for_resource( ) end +""" + start_configuration_policy_association(configuration_policy_identifier, target) + start_configuration_policy_association(configuration_policy_identifier, target, params::Dict{String,<:Any}) + + Associates a target account, organizational unit, or the root with a specified +configuration. The target can be associated with a configuration policy or self-managed +behavior. Only the Security Hub delegated administrator can invoke this operation from the +home Region. + +# Arguments +- `configuration_policy_identifier`: The Amazon Resource Name (ARN) of a configuration + policy, the universally unique identifier (UUID) of a configuration policy, or a value of + SELF_MANAGED_SECURITY_HUB for a self-managed configuration. +- `target`: The identifier of the target account, organizational unit, or the root to + associate with the specified configuration. + +""" +function start_configuration_policy_association( + ConfigurationPolicyIdentifier, Target; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/associate", + Dict{String,Any}( + "ConfigurationPolicyIdentifier" => ConfigurationPolicyIdentifier, + "Target" => Target, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_configuration_policy_association( + ConfigurationPolicyIdentifier, + Target, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "POST", + "/configurationPolicyAssociation/associate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationPolicyIdentifier" => ConfigurationPolicyIdentifier, + "Target" => Target, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_configuration_policy_disassociation(configuration_policy_identifier) + start_configuration_policy_disassociation(configuration_policy_identifier, params::Dict{String,<:Any}) + + Disassociates a target account, organizational unit, or the root from a specified +configuration. When you disassociate a configuration from its target, the target inherits +the configuration of the closest parent. If there’s no configuration to inherit, the +target retains its settings but becomes a self-managed account. A target can be +disassociated from a configuration policy or self-managed behavior. Only the Security Hub +delegated administrator can invoke this operation from the home Region. + +# Arguments +- `configuration_policy_identifier`: The Amazon Resource Name (ARN) of a configuration + policy, the universally unique identifier (UUID) of a configuration policy, or a value of + SELF_MANAGED_SECURITY_HUB for a self-managed configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Target"`: The identifier of the target account, organizational unit, or the root to + disassociate from the specified configuration. +""" +function start_configuration_policy_disassociation( + ConfigurationPolicyIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "POST", + "/configurationPolicyAssociation/disassociate", + Dict{String,Any}("ConfigurationPolicyIdentifier" => ConfigurationPolicyIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_configuration_policy_disassociation( + ConfigurationPolicyIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "POST", + "/configurationPolicyAssociation/disassociate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ConfigurationPolicyIdentifier" => ConfigurationPolicyIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -2497,6 +2969,58 @@ function update_action_target( ) end +""" + update_configuration_policy(identifier) + update_configuration_policy(identifier, params::Dict{String,<:Any}) + + Updates a configuration policy. Only the Security Hub delegated administrator can invoke +this operation from the home Region. + +# Arguments +- `identifier`: The Amazon Resource Name (ARN) or universally unique identifier (UUID) of + the configuration policy. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ConfigurationPolicy"`: An object that defines how Security Hub is configured. It + includes whether Security Hub is enabled or disabled, a list of enabled security standards, + a list of enabled or disabled security controls, and a list of custom parameter values for + specified controls. If you provide a list of security controls that are enabled in the + configuration policy, Security Hub disables all other controls (including newly released + controls). If you provide a list of security controls that are disabled in the + configuration policy, Security Hub enables all other controls (including newly released + controls). When updating a configuration policy, provide a complete list of standards that + you want to enable and a complete list of controls that you want to enable or disable. The + updated configuration replaces the current configuration. +- `"Description"`: The description of the configuration policy. +- `"Name"`: The name of the configuration policy. Alphanumeric characters and the + following ASCII characters are permitted: -, ., !, *, /. +- `"UpdatedReason"`: The reason for updating the configuration policy. +""" +function update_configuration_policy( + Identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "PATCH", + "/configurationPolicy/$(Identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration_policy( + Identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "PATCH", + "/configurationPolicy/$(Identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_finding_aggregator(finding_aggregator_arn, region_linking_mode) update_finding_aggregator(finding_aggregator_arn, region_linking_mode, params::Dict{String,<:Any}) @@ -2574,9 +3098,12 @@ end update_findings(filters) update_findings(filters, params::Dict{String,<:Any}) - UpdateFindings is deprecated. Instead of UpdateFindings, use BatchUpdateFindings. Updates -the Note and RecordState of the Security Hub-aggregated findings that the filter attributes -specify. Any member account that can view the finding also sees the update to the finding. + UpdateFindings is a deprecated operation. Instead of UpdateFindings, use the +BatchUpdateFindings operation. Updates the Note and RecordState of the Security +Hub-aggregated findings that the filter attributes specify. Any member account that can +view the finding also sees the update to the finding. Finding updates made with +UpdateFindings might not be persisted if the same finding is later updated by the finding +provider through the BatchImportFindings operation. # Arguments - `filters`: A collection of attributes that specify which findings you want to update. @@ -2648,21 +3175,31 @@ end update_organization_configuration(auto_enable) update_organization_configuration(auto_enable, params::Dict{String,<:Any}) -Used to update the configuration related to Organizations. Can only be called from a -Security Hub administrator account. +Updates the configuration of your organization in Security Hub. Only the Security Hub +administrator account can invoke this operation. # Arguments -- `auto_enable`: Whether to automatically enable Security Hub for new accounts in the - organization. By default, this is false, and new accounts are not added automatically. To - automatically enable Security Hub for new accounts, set this to true. +- `auto_enable`: Whether to automatically enable Security Hub in new member accounts when + they join the organization. If set to true, then Security Hub is automatically enabled in + new accounts. If set to false, then Security Hub isn't enabled in new accounts + automatically. The default value is false. If the ConfigurationType of your organization is + set to CENTRAL, then this field is set to false and can't be changed in the home Region and + linked Regions. However, in that case, the delegated administrator can create a + configuration policy in which Security Hub is enabled and associate the policy with new + organization accounts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoEnableStandards"`: Whether to automatically enable Security Hub default standards - for new member accounts in the organization. By default, this parameter is equal to - DEFAULT, and new member accounts are automatically enabled with default Security Hub - standards. To opt out of enabling default standards for new member accounts, set this - parameter equal to NONE. + in new member accounts when they join the organization. The default value of this parameter + is equal to DEFAULT. If equal to DEFAULT, then Security Hub default standards are + automatically enabled for new member accounts. If equal to NONE, then default standards are + not automatically enabled for new member accounts. If the ConfigurationType of your + organization is set to CENTRAL, then this field is set to NONE and can't be changed in the + home Region and linked Regions. However, in that case, the delegated administrator can + create a configuration policy in which specific security standards are enabled and + associate the policy with new organization accounts. +- `"OrganizationConfiguration"`: """ function update_organization_configuration( AutoEnable; aws_config::AbstractAWSConfig=global_aws_config() @@ -2691,6 +3228,58 @@ function update_organization_configuration( ) end +""" + update_security_control(parameters, security_control_id) + update_security_control(parameters, security_control_id, params::Dict{String,<:Any}) + + Updates the properties of a security control. + +# Arguments +- `parameters`: An object that specifies which security control parameters to update. +- `security_control_id`: The Amazon Resource Name (ARN) or ID of the control to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LastUpdateReason"`: The most recent reason for updating the properties of the security + control. This field accepts alphanumeric characters in addition to white spaces, dashes, + and underscores. +""" +function update_security_control( + Parameters, SecurityControlId; aws_config::AbstractAWSConfig=global_aws_config() +) + return securityhub( + "PATCH", + "/securityControl/update", + Dict{String,Any}( + "Parameters" => Parameters, "SecurityControlId" => SecurityControlId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_security_control( + Parameters, + SecurityControlId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securityhub( + "PATCH", + "/securityControl/update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "Parameters" => Parameters, "SecurityControlId" => SecurityControlId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_security_hub_configuration() update_security_hub_configuration(params::Dict{String,<:Any}) diff --git a/src/services/securitylake.jl b/src/services/securitylake.jl index 6912d4beb9..cd0d265de1 100644 --- a/src/services/securitylake.jl +++ b/src/services/securitylake.jl @@ -12,7 +12,7 @@ Adds a natively supported Amazon Web Service as an Amazon Security Lake source. source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web -Service as a source, Security Lake starts collecting logs and events from it, You can use +Service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source. @@ -43,8 +43,8 @@ function create_aws_log_source( end """ - create_custom_log_source(source_name) - create_custom_log_source(source_name, params::Dict{String,<:Any}) + create_custom_log_source(configuration, source_name) + create_custom_log_source(configuration, source_name, params::Dict{String,<:Any}) Adds a third-party custom source in Amazon Security Lake, from the Amazon Web Services Region where you want to create a custom source. Security Lake can collect logs and events @@ -55,12 +55,12 @@ from the custom source. In addition, this operation also creates an associated G and an Glue crawler. # Arguments +- `configuration`: The configuration for the third-party custom source. - `source_name`: Specify the name for a third-party custom source. This must be a Regionally unique value. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"configuration"`: The configuration for the third-party custom source. - `"eventClasses"`: The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY @@ -75,17 +75,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys log collection to a specific version of custom data source. """ function create_custom_log_source( - sourceName; aws_config::AbstractAWSConfig=global_aws_config() + configuration, sourceName; aws_config::AbstractAWSConfig=global_aws_config() ) return securitylake( "POST", "/v1/datalake/logsources/custom", - Dict{String,Any}("sourceName" => sourceName); + Dict{String,Any}("configuration" => configuration, "sourceName" => sourceName); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_custom_log_source( + configuration, sourceName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -94,7 +95,13 @@ function create_custom_log_source( "POST", "/v1/datalake/logsources/custom", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("sourceName" => sourceName), params) + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, "sourceName" => sourceName + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -107,17 +114,17 @@ end Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before -enabling log collection in Regions. By default, the CreateDataLake Security Lake in all -Regions. To specify particular Regions, configure these Regions using the configurations -parameter. If you have already enabled Security Lake in a Region when you call this -command, the command will update the Region if you provide new configuration parameters. If -you have not already enabled Security Lake in the Region when you call this API, it will -set up the data lake in the Region with the specified configurations. When you enable -Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This -includes ingesting security data from sources, storing data, and making data accessible to -subscribers. Security Lake also enables all the existing settings and resources that it -stores or maintains for your Amazon Web Services account in the current Region, including -security log and event data. For more information, see the Amazon Security Lake User Guide. +enabling log collection in Regions. To specify particular Regions, configure these Regions +using the configurations parameter. If you have already enabled Security Lake in a Region +when you call this command, the command will update the Region if you provide new +configuration parameters. If you have not already enabled Security Lake in the Region when +you call this API, it will set up the data lake in the Region with the specified +configurations. When you enable Security Lake, it starts ingesting security data after the +CreateAwsLogSource call. This includes ingesting security data from sources, storing data, +and making data accessible to subscribers. Security Lake also enables all the existing +settings and resources that it stores or maintains for your Amazon Web Services account in +the current Region, including security log and event data. For more information, see the +Amazon Security Lake User Guide. # Arguments - `configurations`: Specify the Region or Regions that will contribute data to the rollup @@ -126,6 +133,11 @@ security log and event data. For more information, see the Amazon Security Lake the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: An array of objects, one for each tag to associate with the data lake + configuration. For each tag, you must specify both a tag key and a tag value. A tag value + cannot be null, but it can be an empty string. """ function create_data_lake( configurations, @@ -225,44 +237,35 @@ function create_data_lake_exception_subscription( end """ - create_data_lake_organization_configuration(auto_enable_new_account) - create_data_lake_organization_configuration(auto_enable_new_account, params::Dict{String,<:Any}) + create_data_lake_organization_configuration() + create_data_lake_organization_configuration(params::Dict{String,<:Any}) Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization. -# Arguments -- `auto_enable_new_account`: Enable Security Lake with the specified configuration - settings, to begin collecting security data for new accounts in your organization. - +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoEnableNewAccount"`: Enable Security Lake with the specified configuration settings, + to begin collecting security data for new accounts in your organization. """ -function create_data_lake_organization_configuration( - autoEnableNewAccount; aws_config::AbstractAWSConfig=global_aws_config() +function create_data_lake_organization_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() ) return securitylake( "POST", - "/v1/datalake/organization/configuration", - Dict{String,Any}("autoEnableNewAccount" => autoEnableNewAccount); + "/v1/datalake/organization/configuration"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_data_lake_organization_configuration( - autoEnableNewAccount, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return securitylake( "POST", "/v1/datalake/organization/configuration", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("autoEnableNewAccount" => autoEnableNewAccount), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -279,13 +282,16 @@ Region. # Arguments - `sources`: The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services. -- `subscriber_identity`: The AWS identity used to access your data. +- `subscriber_identity`: The Amazon Web Services identity used to access your data. - `subscriber_name`: The name of your Security Lake subscriber account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessTypes"`: The Amazon S3 or Lake Formation access type. - `"subscriberDescription"`: The description for your subscriber account in Security Lake. +- `"tags"`: An array of objects, one for each tag to associate with the subscriber. For + each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but + it can be an empty string. """ function create_subscriber( sources, @@ -518,46 +524,37 @@ function delete_data_lake_exception_subscription( end """ - delete_data_lake_organization_configuration(auto_enable_new_account) - delete_data_lake_organization_configuration(auto_enable_new_account, params::Dict{String,<:Any}) - -Removes automatic the enablement of configuration settings for new member accounts (but -retains the settings for the delegated administrator) from Amazon Security Lake. You must -run this API using the credentials of the delegated administrator. When you run this API, -new member accounts that are added after the organization enables Security Lake won't -contribute to the data lake. + delete_data_lake_organization_configuration() + delete_data_lake_organization_configuration(params::Dict{String,<:Any}) -# Arguments -- `auto_enable_new_account`: Removes the automatic enablement of configuration settings for - new member accounts in Security Lake. +Turns off automatic enablement of Amazon Security Lake for member accounts that are added +to an organization in Organizations. Only the delegated Security Lake administrator for an +organization can perform this operation. If the delegated Security Lake administrator +performs this operation, new member accounts won't automatically contribute data to the +data lake. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"autoEnableNewAccount"`: Turns off automatic enablement of Security Lake for member + accounts that are added to an organization. """ -function delete_data_lake_organization_configuration( - autoEnableNewAccount; aws_config::AbstractAWSConfig=global_aws_config() +function delete_data_lake_organization_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() ) return securitylake( "POST", - "/v1/datalake/organization/configuration/delete", - Dict{String,Any}("autoEnableNewAccount" => autoEnableNewAccount); + "/v1/datalake/organization/configuration/delete"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function delete_data_lake_organization_configuration( - autoEnableNewAccount, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return securitylake( "POST", "/v1/datalake/organization/configuration/delete", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("autoEnableNewAccount" => autoEnableNewAccount), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -812,7 +809,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. -- `"regions"`: List the Amazon Web Services Regions from which exceptions are retrieved. +- `"regions"`: The Amazon Web Services Regions from which exceptions are retrieved. """ function list_data_lake_exceptions(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -839,12 +836,12 @@ end list_data_lakes(params::Dict{String,<:Any}) Retrieves the Amazon Security Lake configuration object for the specified Amazon Web -Services account ID. You can use the ListDataLakes API to know whether Security Lake is -enabled for any region. +Services Regions. You can use this operation to determine whether Security Lake is enabled +for a Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"regions"`: The list of regions where Security Lake is enabled. +- `"regions"`: The list of Regions where Security Lake is enabled. """ function list_data_lakes(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -876,7 +873,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of accounts for which the log sources are displayed. - `"nextToken"`: If nextToken is returned, there are more results available. You can repeat the call using the returned token to retrieve the next page. -- `"regions"`: The list of regions for which log sources are displayed. +- `"regions"`: The list of Regions for which log sources are displayed. - `"sources"`: The list of sources for which log sources are displayed. """ function list_log_sources(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -930,6 +927,43 @@ function list_subscribers( ) end +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Retrieves the tags (keys and values) that are associated with an Amazon Security Lake +resource: a subscriber, or the data lake configuration for your Amazon Web Services account +in a particular Amazon Web Services Region. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource for + which you want to retrieve the tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return securitylake( + "GET", + "/v1/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "GET", + "/v1/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_data_lake_delegated_administrator(account_id) register_data_lake_delegated_administrator(account_id, params::Dict{String,<:Any}) @@ -970,6 +1004,93 @@ function register_data_lake_delegated_administrator( ) end +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds or updates one or more tags that are associated with an Amazon Security Lake resource: +a subscriber, or the data lake configuration for your Amazon Web Services account in a +particular Amazon Web Services Region. A tag is a label that you can define and associate +with Amazon Web Services resources. Each tag consists of a required tag key and an +associated tag value. A tag key is a general label that acts as a category for a more +specific tag value. A tag value acts as a descriptor for a tag key. Tags can help you +identify, categorize, and manage resources in different ways, such as by owner, +environment, or other criteria. For more information, see Tagging Amazon Security Lake +resources in the Amazon Security Lake User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource to + add or update the tags for. +- `tags`: An array of objects, one for each tag (key and value) to associate with the + Amazon Security Lake resource. For each tag, you must specify both a tag key and a tag + value. A tag value cannot be null, but it can be an empty string. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return securitylake( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags (keys and values) from an Amazon Security Lake resource: a +subscriber, or the data lake configuration for your Amazon Web Services account in a +particular Amazon Web Services Region. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource to + remove one or more tags from. +- `tag_keys`: A list of one or more tag keys. For each value in the list, specify the tag + key for a tag to remove from the Amazon Security Lake resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return securitylake( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_lake(configurations) update_data_lake(configurations, params::Dict{String,<:Any}) @@ -981,6 +1102,11 @@ to consolidate data from multiple Amazon Web Services Regions. - `configurations`: Specify the Region or Regions that will contribute data to the rollup region. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"metaStoreManagerRoleArn"`: The Amazon Resource Name (ARN) used to create and update the + Glue table. This table contains partitions generated by the ingestion and normalization of + Amazon Web Services log sources and custom sources. """ function update_data_lake(configurations; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( diff --git a/src/services/service_catalog.jl b/src/services/service_catalog.jl index ff11d74d31..e88c352c39 100644 --- a/src/services/service_catalog.jl +++ b/src/services/service_catalog.jl @@ -97,7 +97,7 @@ Associates the specified principal ARN with the specified portfolio. If you shar portfolio with principal name sharing enabled, the PrincipalARN association is included in the share. The PortfolioID, PrincipalARN, and PrincipalType parameters are required. You can associate a maximum of 10 Principals with a portfolio using PrincipalType as -IAM_PATTERN When you associate a principal with portfolio, a potential privilege +IAM_PATTERN. When you associate a principal with portfolio, a potential privilege escalation path may occur when that portfolio is then shared with other accounts. For a user in a recipient account who is not an Service Catalog Admin, but still has the ability to create Principals (Users/Groups/Roles), that user could create a role that matches a @@ -244,6 +244,9 @@ Associates a self-service action with a provisioning artifact. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AcceptLanguage"`: The language code. jp - Japanese zh - Chinese +- `"IdempotencyToken"`: A unique identifier that you provide to ensure idempotency. If + multiple requests from the same Amazon Web Services account use the same idempotency token, + the same response is returned for each repeated request. """ function associate_service_action_with_provisioning_artifact( ProductId, @@ -257,6 +260,7 @@ function associate_service_action_with_provisioning_artifact( "ProductId" => ProductId, "ProvisioningArtifactId" => ProvisioningArtifactId, "ServiceActionId" => ServiceActionId, + "IdempotencyToken" => string(uuid4()), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -278,6 +282,7 @@ function associate_service_action_with_provisioning_artifact( "ProductId" => ProductId, "ProvisioningArtifactId" => ProvisioningArtifactId, "ServiceActionId" => ServiceActionId, + "IdempotencyToken" => string(uuid4()), ), params, ), @@ -667,13 +672,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that are associated to the OrganizationNode. The output returns a PortfolioShareToken, which enables the administrator to monitor the status of the PortfolioShare creation process. -- `"SharePrincipals"`: Enables or disables Principal sharing when creating the portfolio - share. If this flag is not provided, principal sharing is disabled. When you enable - Principal Name Sharing for a portfolio share, the share recipient account end users with a - principal that matches any of the associated IAM patterns can provision products from the - portfolio. Once shared, the share recipient can view associations of PrincipalType: - IAM_PATTERN on their portfolio. You can create the principals in the recipient account - before or after creating the share. +- `"SharePrincipals"`: This parameter is only supported for portfolios with an + OrganizationalNode Type of ORGANIZATION or ORGANIZATIONAL_UNIT. Enables or disables + Principal sharing when creating the portfolio share. If you do not provide this flag, + principal sharing is disabled. When you enable Principal Name Sharing for a portfolio + share, the share recipient account end users with a principal that matches any of the + associated IAM patterns can provision products from the portfolio. Once shared, the share + recipient can view associations of PrincipalType: IAM_PATTERN on their portfolio. You can + create the principals in the recipient account before or after creating the share. - `"ShareTagOptions"`: Enables or disables TagOptions sharing when creating the portfolio share. If this flag is not provided, TagOptions sharing is disabled. """ @@ -1286,11 +1292,14 @@ Deletes a self-service action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AcceptLanguage"`: The language code. jp - Japanese zh - Chinese +- `"IdempotencyToken"`: A unique identifier that you provide to ensure idempotency. If + multiple requests from the same Amazon Web Services account use the same idempotency token, + the same response is returned for each repeated request. """ function delete_service_action(Id; aws_config::AbstractAWSConfig=global_aws_config()) return service_catalog( "DeleteServiceAction", - Dict{String,Any}("Id" => Id); + Dict{String,Any}("Id" => Id, "IdempotencyToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1300,7 +1309,13 @@ function delete_service_action( ) return service_catalog( "DeleteServiceAction", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Id" => Id, "IdempotencyToken" => string(uuid4())), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2139,6 +2154,9 @@ artifact. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AcceptLanguage"`: The language code. jp - Japanese zh - Chinese +- `"IdempotencyToken"`: A unique identifier that you provide to ensure idempotency. If + multiple requests from the same Amazon Web Services account use the same idempotency token, + the same response is returned for each repeated request. """ function disassociate_service_action_from_provisioning_artifact( ProductId, @@ -2152,6 +2170,7 @@ function disassociate_service_action_from_provisioning_artifact( "ProductId" => ProductId, "ProvisioningArtifactId" => ProvisioningArtifactId, "ServiceActionId" => ServiceActionId, + "IdempotencyToken" => string(uuid4()), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2173,6 +2192,7 @@ function disassociate_service_action_from_provisioning_artifact( "ProductId" => ProductId, "ProvisioningArtifactId" => ProvisioningArtifactId, "ServiceActionId" => ServiceActionId, + "IdempotencyToken" => string(uuid4()), ), params, ), @@ -2447,15 +2467,17 @@ end associated to an Service Catalog product and provisioning artifact. Once imported, all supported governance actions are supported on the provisioned product. Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets, and non-root nested -stacks are not supported. The CloudFormation stack must have one of the following +stacks, are not supported. The CloudFormation stack must have one of the following statuses to be imported: CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, and IMPORT_ROLLBACK_COMPLETE. Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning -artifact. When you import an existing CloudFormation stack into a portfolio, constraints -that are associated with the product aren't applied during the import process. The -constraints are applied after you call UpdateProvisionedProduct for the provisioned -product. The user or role that performs this operation must have the -cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions. +artifact. When you import an existing CloudFormation stack into a portfolio, Service +Catalog does not apply the product's associated constraints during the import process. +Service Catalog applies the constraints after you call UpdateProvisionedProduct for the +provisioned product. The user or role that performs this operation must have the +cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions. You +can only import one provisioned product at a time. The product's CloudFormation stack must +have the IMPORT_COMPLETE status before you import another. # Arguments - `idempotency_token`: A unique identifier that you provide to ensure idempotency. If @@ -3664,7 +3686,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AccessLevelFilter"`: The access level to use to obtain results. The default is User. - `"Filters"`: The search filters. When the key is SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, - provisioningArtifact, type, status, tags, userArn, userArnSession, + provisioningArtifactId, type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, productName, and provisioningArtifactName. Example: \"SearchQuery\":[\"status:AVAILABLE\"] - `"PageSize"`: The maximum number of items to return with this call. @@ -3844,7 +3866,7 @@ Updates the specified portfolio share. You can use this API to enable or disable sharing or Principal sharing for an existing portfolio share. The portfolio share cannot be updated if the CreatePortfolioShare operation is IN_PROGRESS, as the share is not available to recipient entities. In this case, you must wait for the portfolio share to be -COMPLETED. You must provide the accountId or organization node in the input, but not both. +completed. You must provide the accountId or organization node in the input, but not both. If the portfolio is shared to both an external account and an organization node, and both shares need to be updated, you must invoke UpdatePortfolioShare separately for each share type. This API cannot be used for removing the portfolio share. You must use diff --git a/src/services/service_catalog_appregistry.jl b/src/services/service_catalog_appregistry.jl index dcbefd206f..8267f5dbc0 100644 --- a/src/services/service_catalog_appregistry.jl +++ b/src/services/service_catalog_appregistry.jl @@ -48,13 +48,24 @@ end associate_resource(application, resource, resource_type, params::Dict{String,<:Any}) Associates a resource with an application. The resource can be specified by its ARN or -name. The application can be specified by ARN, ID, or name. +name. The application can be specified by ARN, ID, or name. Minimum permissions You +must have the following permissions to associate a resource using the OPTIONS parameter set +to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have +these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess +policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry +Administrator Guide. resource-groups:AssociateResource cloudformation:UpdateStack + cloudformation:DescribeStacks In addition, you must have the tagging permission +defined by the Amazon Web Services service that creates the resource. For more information, +see TagResources in the Resource Groups Tagging API Reference. # Arguments - `application`: The name, ID, or ARN of the application. - `resource`: The name or ID of the resource of which the application will be associated. - `resource_type`: The type of resource of which the application will be associated. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"options"`: Determines whether an application tag is applied or skipped. """ function associate_resource( application, resource, resourceType; aws_config::AbstractAWSConfig=global_aws_config() @@ -308,8 +319,17 @@ end disassociate_resource(application, resource, resource_type) disassociate_resource(application, resource, resource_type, params::Dict{String,<:Any}) -Disassociates a resource from application. Both the resource and the application can be -specified either by ID or name. + Disassociates a resource from application. Both the resource and the application can be +specified either by ID or name. Minimum permissions You must have the following +permissions to remove a resource that's been associated with an application using the +APPLY_APPLICATION_TAG option for AssociateResource. tag:GetResources +tag:UntagResources You must also have the following permissions if you don't use the +AWSServiceCatalogAppRegistryFullAccess policy. For more information, see +AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. +resource-groups:DisassociateResource cloudformation:UpdateStack +cloudformation:DescribeStacks In addition, you must have the tagging permission +defined by the Amazon Web Services service that creates the resource. For more information, +see UntagResources in the Resource Groups Tagging API Reference. # Arguments - `application`: The name or ID of the application. @@ -390,6 +410,14 @@ Gets the resource associated with the application. - `resource`: The name or ID of the resource associated with the application. - `resource_type`: The type of resource associated with the application. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. If the parameter is omitted, it + defaults to 25. The value is optional. +- `"nextToken"`: A unique pagination token for each page of results. Make the call again + with the returned token to retrieve the next page of results. +- `"resourceTagStatus"`: States whether an application tag is applied, not applied, in the + process of being applied, or skipped. """ function get_associated_resource( application, resource, resourceType; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/service_quotas.jl b/src/services/service_quotas.jl index 1a3af68e81..a5790b8194 100644 --- a/src/services/service_quotas.jl +++ b/src/services/service_quotas.jl @@ -8,10 +8,10 @@ using AWS.UUIDs associate_service_quota_template() associate_service_quota_template(params::Dict{String,<:Any}) -Associates your quota request template with your organization. When a new account is -created in your organization, the quota increase requests in the template are automatically -applied to the account. You can add a quota increase request for any adjustable quota to -your template. +Associates your quota request template with your organization. When a new Amazon Web +Services account is created in your organization, the quota increase requests in the +template are automatically applied to the account. You can add a quota increase request for +any adjustable quota to your template. """ function associate_service_quota_template(; @@ -41,9 +41,12 @@ end Deletes the quota increase request for the specified quota from your quota request template. # Arguments -- `aws_region`: The AWS Region. -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `aws_region`: Specifies the Amazon Web Services Region for which the request was made. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. """ function delete_service_quota_increase_request_from_template( @@ -88,8 +91,8 @@ end disassociate_service_quota_template(params::Dict{String,<:Any}) Disables your quota request template. After a template is disabled, the quota increase -requests in the template are not applied to new accounts in your organization. Disabling a -quota request template does not apply its quota increase requests. +requests in the template are not applied to new Amazon Web Services accounts in your +organization. Disabling a quota request template does not apply its quota increase requests. """ function disassociate_service_quota_template(; @@ -147,8 +150,11 @@ Retrieves the default value for the specified quota. The default value does not quota increases. # Arguments -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. """ function get_awsdefault_service_quota( @@ -188,7 +194,7 @@ end Retrieves information about the specified quota increase request. # Arguments -- `request_id`: The ID of the quota increase request. +- `request_id`: Specifies the ID of the quota increase request. """ function get_requested_service_quota_change( @@ -225,9 +231,17 @@ default values are available. If the applied quota value is not available for a quota is not retrieved. # Arguments -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ContextId"`: Specifies the Amazon Web Services account or resource to which the quota + applies. The value in this field depends on the context scope associated with the specified + service quota. """ function get_service_quota( QuotaCode, ServiceCode; aws_config::AbstractAWSConfig=global_aws_config() @@ -267,9 +281,12 @@ Retrieves information about the specified quota increase request in your quota r template. # Arguments -- `aws_region`: The AWS Region. -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `aws_region`: Specifies the Amazon Web Services Region for which you made the request. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. """ function get_service_quota_increase_request_from_template( @@ -313,17 +330,27 @@ end list_awsdefault_service_quotas(service_code) list_awsdefault_service_quotas(service_code, params::Dict{String,<:Any}) -Lists the default values for the quotas for the specified AWS service. A default value does -not reflect any quota increases. +Lists the default values for the quotas for the specified Amazon Web Service. A default +value does not reflect any quota increases. # Arguments -- `service_code`: The service identifier. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. """ function list_awsdefault_service_quotas( ServiceCode; aws_config::AbstractAWSConfig=global_aws_config() @@ -354,15 +381,28 @@ end list_requested_service_quota_change_history() list_requested_service_quota_change_history(params::Dict{String,<:Any}) -Retrieves the quota increase requests for the specified service. +Retrieves the quota increase requests for the specified Amazon Web Service. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. -- `"ServiceCode"`: The service identifier. -- `"Status"`: The status of the quota increase request. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. +- `"QuotaRequestedAtLevel"`: Specifies at which level within the Amazon Web Services + account the quota request applies to. +- `"ServiceCode"`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. +- `"Status"`: Specifies that you want to filter the results to only the requests with the + matching status. """ function list_requested_service_quota_change_history(; aws_config::AbstractAWSConfig=global_aws_config() @@ -391,15 +431,30 @@ end Retrieves the quota increase requests for the specified quota. # Arguments -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. -- `"Status"`: The status value of the quota increase request. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. +- `"QuotaRequestedAtLevel"`: Specifies at which level within the Amazon Web Services + account the quota request applies to. +- `"Status"`: Specifies that you want to filter the results to only the requests with the + matching status. """ function list_requested_service_quota_change_history_by_quota( QuotaCode, ServiceCode; aws_config::AbstractAWSConfig=global_aws_config() @@ -439,11 +494,21 @@ Lists the quota increase requests in the specified quota request template. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AwsRegion"`: The AWS Region. -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. -- `"ServiceCode"`: The service identifier. +- `"AwsRegion"`: Specifies the Amazon Web Services Region for which you made the request. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. +- `"ServiceCode"`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. """ function list_service_quota_increase_requests_in_template(; aws_config::AbstractAWSConfig=global_aws_config() @@ -469,18 +534,33 @@ end list_service_quotas(service_code) list_service_quotas(service_code, params::Dict{String,<:Any}) -Lists the applied quota values for the specified AWS service. For some quotas, only the -default values are available. If the applied quota value is not available for a quota, the -quota is not retrieved. +Lists the applied quota values for the specified Amazon Web Service. For some quotas, only +the default values are available. If the applied quota value is not available for a quota, +the quota is not retrieved. # Arguments -- `service_code`: The service identifier. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. +- `"QuotaAppliedAtLevel"`: Specifies at which level of granularity that the quota value is + applied. +- `"QuotaCode"`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. """ function list_service_quotas(ServiceCode; aws_config::AbstractAWSConfig=global_aws_config()) return service_quotas( @@ -509,13 +589,22 @@ end list_services() list_services(params::Dict{String,<:Any}) -Lists the names and codes for the services integrated with Service Quotas. +Lists the names and codes for the Amazon Web Services integrated with Service Quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve - the remaining results, if any, make another call with the token returned from this call. -- `"NextToken"`: The token for the next page of results. +- `"MaxResults"`: Specifies the maximum number of results that you want included on each + page of the response. If you do not include this parameter, it defaults to a value + appropriate to the operation. If additional items exist beyond those included in the + current response, the NextToken response element is present and has a value (is not null). + Include that value as the NextToken request parameter in the next call to the operation to + get the next part of the results. An API operation can return fewer results than the + maximum even when there are more results available. You should check NextToken after every + operation to ensure that you receive all of the results. +- `"NextToken"`: Specifies a value for receiving additional results after you receive a + NextToken response in a previous request. A NextToken response indicates that more output + is available. Set this parameter to the value of the previous call's NextToken response to + indicate where the output should continue from. """ function list_services(; aws_config::AbstractAWSConfig=global_aws_config()) return service_quotas( @@ -539,8 +628,8 @@ Returns a list of the tags assigned to the specified applied quota. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) for the applied quota for which you want to list tags. You can get this information by using the Service Quotas console, or by - listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas - AWS API operation. + listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas + Amazon Web Services API operation. """ function list_tags_for_resource( @@ -575,10 +664,13 @@ end Adds a quota increase request to your quota request template. # Arguments -- `aws_region`: The AWS Region. -- `desired_value`: The new, increased value for the quota. -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `aws_region`: Specifies the Amazon Web Services Region to which the template applies. +- `desired_value`: Specifies the new, increased value for the quota. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. """ function put_service_quota_increase_request_into_template( @@ -634,10 +726,18 @@ end Submits a quota increase request for the specified quota. # Arguments -- `desired_value`: The new, increased value for the quota. -- `quota_code`: The quota identifier. -- `service_code`: The service identifier. +- `desired_value`: Specifies the new, increased value for the quota. +- `quota_code`: Specifies the quota identifier. To find the quota code for a specific + quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the + output for the quota you want. +- `service_code`: Specifies the service identifier. To find the service code value for an + Amazon Web Services service, use the ListServices operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ContextId"`: Specifies the Amazon Web Services account or resource to which the quota + applies. The value in this field depends on the context scope associated with the specified + service quota. """ function request_service_quota_increase( DesiredValue, QuotaCode, ServiceCode; aws_config::AbstractAWSConfig=global_aws_config() @@ -688,7 +788,7 @@ quota. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) for the applied quota. You can get this information by using the Service Quotas console, or by listing the quotas using the - list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation. + list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation. - `tags`: The tags that you want to add to the resource. """ @@ -729,8 +829,8 @@ Removes tags from the specified applied quota. You can specify one or more tags # Arguments - `resource_arn`: The Amazon Resource Name (ARN) for the applied quota that you want to untag. You can get this information by using the Service Quotas console, or by listing the - quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API - operation. + quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web + Services API operation. - `tag_keys`: The keys of the tags that you want to remove from the resource. """ diff --git a/src/services/servicediscovery.jl b/src/services/servicediscovery.jl index 90612967da..1dfc6d12ca 100644 --- a/src/services/servicediscovery.jl +++ b/src/services/servicediscovery.jl @@ -355,12 +355,16 @@ end discover_instances(namespace_name, service_name, params::Dict{String,<:Any}) Discovers registered instances for a specified namespace and service. You can use -DiscoverInstances to discover instances for any type of namespace. For public and private -DNS namespaces, you can also use DNS queries to discover instances. +DiscoverInstances to discover instances for any type of namespace. DiscoverInstances +returns a randomized list of instances allowing customers to distribute traffic evenly +across instances. For public and private DNS namespaces, you can also use DNS queries to +discover instances. # Arguments - `namespace_name`: The HttpName name of the namespace. It's found in the HttpProperties - member of the Properties member of the namespace. + member of the Properties member of the namespace. In most cases, Name and HttpName match. + However, if you reuse Name for namespace creation, a generated hash is added to HttpName to + distinguish the two. - `service_name`: The name of the service that you specified when you registered the instance. @@ -416,6 +420,51 @@ function discover_instances( ) end +""" + discover_instances_revision(namespace_name, service_name) + discover_instances_revision(namespace_name, service_name, params::Dict{String,<:Any}) + +Discovers the increasing revision associated with an instance. + +# Arguments +- `namespace_name`: The HttpName name of the namespace. It's found in the HttpProperties + member of the Properties member of the namespace. +- `service_name`: The name of the service that you specified when you registered the + instance. + +""" +function discover_instances_revision( + NamespaceName, ServiceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return servicediscovery( + "DiscoverInstancesRevision", + Dict{String,Any}("NamespaceName" => NamespaceName, "ServiceName" => ServiceName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function discover_instances_revision( + NamespaceName, + ServiceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return servicediscovery( + "DiscoverInstancesRevision", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "NamespaceName" => NamespaceName, "ServiceName" => ServiceName + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_instance(instance_id, service_id) get_instance(instance_id, service_id, params::Dict{String,<:Any}) @@ -542,8 +591,8 @@ end get_operation(operation_id, params::Dict{String,<:Any}) Gets information about any operation that returns an operation ID in the response, such as -a CreateService request. To get a list of operations that match specified criteria, see -ListOperations. +a CreateHttpNamespace request. To get a list of operations that match specified criteria, +see ListOperations. # Arguments - `operation_id`: The ID of the operation that you want to get more information about. @@ -721,7 +770,7 @@ end list_services(params::Dict{String,<:Any}) Lists summary information for all the services that are associated with one or more -specified namespaces. +namespaces. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -812,9 +861,9 @@ Guide. - `attributes`: A string map that contains the following information for the service that you specify in ServiceId: The attributes that apply to the records that are defined in the service. For each attribute, the applicable value. Do not include sensitive - information in the attributes if the namespace is discoverable by public DNS queries. - Supported attribute keys include the following: AWS_ALIAS_DNS_NAME If you want Cloud Map - to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing + information in the attributes if the namespace is discoverable by public DNS queries. The + following are the supported attribute keys. AWS_ALIAS_DNS_NAME If you want Cloud Map to + create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference. Note the following: The configuration for the service that's @@ -822,38 +871,39 @@ Guide. the service that's specified by ServiceId, the value of RoutingPolicy must be WEIGHTED. If the service that's specified by ServiceId includes HealthCheckConfig settings, Cloud Map will create the Route 53 health check, but it doesn't associate the health check with the - alias record. Auto naming currently doesn't support creating alias records that route + alias record. Cloud Map currently doesn't support creating alias records that route traffic to Amazon Web Services resources other than Elastic Load Balancing load balancers. If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the - AWS_INSTANCE attributes. AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 - instance ID for the instance. If the AWS_EC2_INSTANCE_ID attribute is specified, then the - only other attribute that can be specified is AWS_INIT_HEALTH_STATUS. When the - AWS_EC2_INSTANCE_ID attribute is specified, then the AWS_INSTANCE_IPV4 attribute will be - filled out with the primary private IPv4 address. AWS_INIT_HEALTH_STATUS If the service - configuration includes HealthCheckCustomConfig, you can optionally use - AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or - UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is - HEALTHY. AWS_INSTANCE_CNAME If the service configuration includes a CNAME record, the - domain name that you want Route 53 to return in response to DNS queries (for example, - example.com). This value is required if the service specified by ServiceId includes - settings for an CNAME record. AWS_INSTANCE_IPV4 If the service configuration includes an - A record, the IPv4 address that you want Route 53 to return in response to DNS queries - (for example, 192.0.2.44). This value is required if the service specified by ServiceId - includes settings for an A record. If the service includes settings for an SRV record, you - must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_IPV6 - If the service configuration includes an AAAA record, the IPv6 address that you want - Route 53 to return in response to DNS queries (for example, - 2001:0db8:85a3:0000:0000:abcd:0001:2345). This value is required if the service specified - by ServiceId includes settings for an AAAA record. If the service includes settings for an - SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. - AWS_INSTANCE_PORT If the service includes an SRV record, the value that you want Route 53 - to return for the port. If the service includes HealthCheckConfig, the port on the endpoint - that you want Route 53 to send requests to. This value is required if you specified - settings for an SRV record or a Route 53 health check when you created the service. - Custom attributes You can add up to 30 custom attributes. For each key-value pair, the - maximum length of the attribute name is 255 characters, and the maximum length of the - attribute value is 1,024 characters. The total size of all provided attributes (sum of all - keys and values) must not exceed 5,000 characters. + AWS_INSTANCE attributes. The AWS_ALIAS_DNS_NAME is not supported in the GovCloud (US) + Regions. AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance ID for the + instance. If the AWS_EC2_INSTANCE_ID attribute is specified, then the only other attribute + that can be specified is AWS_INIT_HEALTH_STATUS. When the AWS_EC2_INSTANCE_ID attribute is + specified, then the AWS_INSTANCE_IPV4 attribute will be filled out with the primary private + IPv4 address. AWS_INIT_HEALTH_STATUS If the service configuration includes + HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUS to specify the + initial status of the custom health check, HEALTHY or UNHEALTHY. If you don't specify a + value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY. AWS_INSTANCE_CNAME If + the service configuration includes a CNAME record, the domain name that you want Route 53 + to return in response to DNS queries (for example, example.com). This value is required if + the service specified by ServiceId includes settings for an CNAME record. + AWS_INSTANCE_IPV4 If the service configuration includes an A record, the IPv4 address that + you want Route 53 to return in response to DNS queries (for example, 192.0.2.44). This + value is required if the service specified by ServiceId includes settings for an A record. + If the service includes settings for an SRV record, you must specify a value for + AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_IPV6 If the service + configuration includes an AAAA record, the IPv6 address that you want Route 53 to return + in response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345). This + value is required if the service specified by ServiceId includes settings for an AAAA + record. If the service includes settings for an SRV record, you must specify a value for + AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_PORT If the service includes + an SRV record, the value that you want Route 53 to return for the port. If the service + includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send + requests to. This value is required if you specified settings for an SRV record or a + Route 53 health check when you created the service. Custom attributes You can add up to + 30 custom attributes. For each key-value pair, the maximum length of the attribute name is + 255 characters, and the maximum length of the attribute value is 1,024 characters. The + total size of all provided attributes (sum of all keys and values) must not exceed 5,000 + characters. - `instance_id`: An identifier that you want to associate with the instance. Note the following: If the service that's specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV diff --git a/src/services/ses.jl b/src/services/ses.jl index b7c047645a..597039f3df 100644 --- a/src/services/ses.jl +++ b/src/services/ses.jl @@ -15,9 +15,9 @@ can execute this operation no more than once per second. # Arguments - `original_rule_set_name`: The name of the rule set to clone. -- `rule_set_name`: The name of the rule set to create. The name must: This value can only - contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). Start - and end with a letter or number. Contain less than 64 characters. +- `rule_set_name`: The name of the rule set to create. The name must meet the following + requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or + dashes (-). Start and end with a letter or number. Contain 64 characters or fewer. """ function clone_receipt_rule_set( @@ -101,16 +101,16 @@ end Creates a configuration set event destination. When you create or update an event destination, you must provide one, and only one, destination. The destination can be CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). -An event destination is the AWS service to which Amazon SES publishes the email sending -events associated with a configuration set. For information about using configuration sets, -see the Amazon SES Developer Guide. You can execute this operation no more than once per -second. +An event destination is the Amazon Web Services service to which Amazon SES publishes the +email sending events associated with a configuration set. For information about using +configuration sets, see the Amazon SES Developer Guide. You can execute this operation no +more than once per second. # Arguments - `configuration_set_name`: The name of the configuration set that the event destination should be associated with. -- `event_destination`: An object that describes the AWS service that email sending event - information will be published to. +- `event_destination`: An object that describes the Amazon Web Services service that email + sending event where information is published. """ function create_configuration_set_event_destination( @@ -320,12 +320,12 @@ Developer Guide. You can execute this operation no more than once per second. # Arguments - `rule`: A data structure that contains the specified rule's name, actions, recipients, domains, enabled status, scan status, and TLS policy. -- `rule_set_name`: The name of the rule set that the receipt rule will be added to. +- `rule_set_name`: The name of the rule set where the receipt rule is added. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"After"`: The name of an existing rule after which the new rule will be placed. If this - parameter is null, the new rule will be inserted at the beginning of the rule list. +- `"After"`: The name of an existing rule after which the new rule is placed. If this + parameter is null, the new rule is inserted at the beginning of the rule list. """ function create_receipt_rule( Rule, RuleSetName; aws_config::AbstractAWSConfig=global_aws_config() @@ -365,9 +365,9 @@ Creates an empty receipt rule set. For information about setting up receipt rule the Amazon SES Developer Guide. You can execute this operation no more than once per second. # Arguments -- `rule_set_name`: The name of the rule set to create. The name must: This value can only - contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). Start - and end with a letter or number. Contain less than 64 characters. +- `rule_set_name`: The name of the rule set to create. The name must meet the following + requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or + dashes (-). Start and end with a letter or number. Contain 64 characters or fewer. """ function create_receipt_rule_set( @@ -400,12 +400,12 @@ end create_template(template, params::Dict{String,<:Any}) Creates an email template. Email templates enable you to send personalized email to one or -more destinations in a single API operation. For more information, see the Amazon SES -Developer Guide. You can execute this operation no more than once per second. +more destinations in a single operation. For more information, see the Amazon SES Developer +Guide. You can execute this operation no more than once per second. # Arguments -- `template`: The content of the email, composed of a subject line, an HTML part, and a - text-only part. +- `template`: The content of the email, composed of a subject line and either an HTML part + or a text-only part. """ function create_template(Template; aws_config::AbstractAWSConfig=global_aws_config()) @@ -533,13 +533,12 @@ Deletes an association between a configuration set and a custom domain for open event tracking. By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES -Developer Guide. Deleting this kind of association will result in emails sent using the +Developer Guide. Deleting this kind of association results in emails sent using the specified configuration set to capture open and click events using the standard, Amazon SES-operated domains. # Arguments -- `configuration_set_name`: The name of the configuration set from which you want to delete - the tracking options. +- `configuration_set_name`: The name of the configuration set. """ function delete_configuration_set_tracking_options( @@ -580,8 +579,7 @@ verification email templates, see Using Custom Verification Email Templates in t SES Developer Guide. You can execute this operation no more than once per second. # Arguments -- `template_name`: The name of the custom verification email template that you want to - delete. +- `template_name`: The name of the custom verification email template to delete. """ function delete_custom_verification_email_template( @@ -617,7 +615,8 @@ Deletes the specified identity (an email address or a domain) from the list of v identities. You can execute this operation no more than once per second. # Arguments -- `identity`: The identity to be removed from the list of identities for the AWS Account. +- `identity`: The identity to be removed from the list of identities for the Amazon Web + Services account. """ function delete_identity(Identity; aws_config::AbstractAWSConfig=global_aws_config()) @@ -648,19 +647,18 @@ end delete_identity_policy(identity, policy_name, params::Dict{String,<:Any}) Deletes the specified sending authorization policy for the given identity (an email address -or a domain). This API returns successfully even if a policy with the specified name does -not exist. This API is for the identity owner only. If you have not verified the identity, -this API will return an error. Sending authorization is a feature that enables an identity -owner to authorize other senders to use its identities. For information about using sending -authorization, see the Amazon SES Developer Guide. You can execute this operation no more -than once per second. +or a domain). This operation returns successfully even if a policy with the specified name +does not exist. This operation is for the identity owner only. If you have not verified +the identity, it returns an error. Sending authorization is a feature that enables an +identity owner to authorize other senders to use its identities. For information about +using sending authorization, see the Amazon SES Developer Guide. You can execute this +operation no more than once per second. # Arguments -- `identity`: The identity that is associated with the policy that you want to delete. You - can specify the identity by using its name or by using its Amazon Resource Name (ARN). - Examples: user@example.com, example.com, - arn:aws:ses:us-east-1:123456789012:identity/example.com. To successfully call this API, you - must own the identity. +- `identity`: The identity that is associated with the policy to delete. You can specify + the identity by using its name or by using its Amazon Resource Name (ARN). Examples: + user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. To + successfully call this operation, you must own the identity. - `policy_name`: The name of the policy to be deleted. """ @@ -1037,7 +1035,7 @@ end get_account_sending_enabled() get_account_sending_enabled(params::Dict{String,<:Any}) -Returns the email sending status of the Amazon SES account for the current region. You can +Returns the email sending status of the Amazon SES account for the current Region. You can execute this operation no more than once per second. """ @@ -1067,8 +1065,7 @@ Templates in the Amazon SES Developer Guide. You can execute this operation no m once per second. # Arguments -- `template_name`: The name of the custom verification email template that you want to - retrieve. +- `template_name`: The name of the custom verification email template to retrieve. """ function get_custom_verification_email_template( @@ -1227,17 +1224,17 @@ end Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy -contents. You can retrieve a maximum of 20 policies at a time. This API is for the -identity owner only. If you have not verified the identity, this API will return an error. -Sending authorization is a feature that enables an identity owner to authorize other -senders to use its identities. For information about using sending authorization, see the -Amazon SES Developer Guide. You can execute this operation no more than once per second. +contents. You can retrieve a maximum of 20 policies at a time. This operation is for the +identity owner only. If you have not verified the identity, it returns an error. Sending +authorization is a feature that enables an identity owner to authorize other senders to use +its identities. For information about using sending authorization, see the Amazon SES +Developer Guide. You can execute this operation no more than once per second. # Arguments -- `identity`: The identity for which the policies will be retrieved. You can specify an +- `identity`: The identity for which the policies are retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. To - successfully call this API, you must own the identity. + successfully call this operation, you must own the identity. - `policy_names`: A list of the names of policies to be retrieved. You can retrieve a maximum of 20 policies at a time. If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies. @@ -1283,15 +1280,15 @@ verification status of an email address is \"Pending\" until the email address o the link within the verification email that Amazon SES sent to that address. If the email address owner clicks the link within 24 hours, the verification status of the email address changes to \"Success\". If the link is not clicked within 24 hours, the verification status -changes to \"Failed.\" In that case, if you still want to verify the email address, you -must restart the verification process from the beginning. For domain identities, the -domain's verification status is \"Pending\" as Amazon SES searches for the required TXT -record in the DNS settings of the domain. When Amazon SES detects the record, the domain's -verification status changes to \"Success\". If Amazon SES is unable to detect the record -within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if -you still want to verify the domain, you must restart the verification process from the -beginning. This operation is throttled at one request per second and can only get -verification attributes for up to 100 identities at a time. +changes to \"Failed.\" In that case, to verify the email address, you must restart the +verification process from the beginning. For domain identities, the domain's verification +status is \"Pending\" as Amazon SES searches for the required TXT record in the DNS +settings of the domain. When Amazon SES detects the record, the domain's verification +status changes to \"Success\". If Amazon SES is unable to detect the record within 72 +hours, the domain's verification status changes to \"Failed.\" In that case, to verify the +domain, you must restart the verification process from the beginning. This operation is +throttled at one request per second and can only get verification attributes for up to 100 +identities at a time. # Arguments - `identities`: A list of identities. @@ -1345,10 +1342,10 @@ end get_send_statistics() get_send_statistics(params::Dict{String,<:Any}) -Provides sending statistics for the current AWS Region. The result is a list of data -points, representing the last two weeks of sending activity. Each data point in the list -contains statistics for a 15-minute period of time. You can execute this operation no more -than once per second. +Provides sending statistics for the current Amazon Web Services Region. The result is a +list of data points, representing the last two weeks of sending activity. Each data point +in the list contains statistics for a 15-minute period of time. You can execute this +operation no more than once per second. """ function get_send_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1370,7 +1367,7 @@ Displays the template object (which includes the Subject line, HTML part and tex the template you specify. You can execute this operation no more than once per second. # Arguments -- `template_name`: The name of the template you want to retrieve. +- `template_name`: The name of the template to retrieve. """ function get_template(TemplateName; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1401,13 +1398,13 @@ end list_configuration_sets(params::Dict{String,<:Any}) Provides a list of the configuration sets associated with your Amazon SES account in the -current AWS Region. For information about using configuration sets, see Monitoring Your -Amazon SES Sending Activity in the Amazon SES Developer Guide. You can execute this -operation no more than once per second. This operation will return up to 1,000 +current Amazon Web Services Region. For information about using configuration sets, see +Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide. You can +execute this operation no more than once per second. This operation returns up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 -configuration sets, this operation will also return a NextToken element. You can then -execute the ListConfigurationSets operation again, passing the NextToken parameter and the -value of the NextToken element to retrieve additional results. +configuration sets, this operation also returns NextToken. You can then execute the +ListConfigurationSets operation again, passing the NextToken parameter and the value of the +NextToken element to retrieve additional results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1435,17 +1432,16 @@ end list_custom_verification_email_templates() list_custom_verification_email_templates(params::Dict{String,<:Any}) -Lists the existing custom verification email templates for your account in the current AWS -Region. For more information about custom verification email templates, see Using Custom -Verification Email Templates in the Amazon SES Developer Guide. You can execute this -operation no more than once per second. +Lists the existing custom verification email templates for your account in the current +Amazon Web Services Region. For more information about custom verification email templates, +see Using Custom Verification Email Templates in the Amazon SES Developer Guide. You can +execute this operation no more than once per second. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if - you specify a value less than 1 or greater than 50, the operation will return up to 50 - results. + you specify a value less than 1 or greater than 50, the operation returns up to 50 results. - `"NextToken"`: An array the contains the name and creation time stamp for each template in your Amazon SES account. """ @@ -1473,14 +1469,21 @@ end list_identities() list_identities(params::Dict{String,<:Any}) -Returns a list containing all of the identities (email addresses and domains) for your AWS -account in the current AWS Region, regardless of verification status. You can execute this -operation no more than once per second. +Returns a list containing all of the identities (email addresses and domains) for your +Amazon Web Services account in the current Amazon Web Services Region, regardless of +verification status. You can execute this operation no more than once per second. It's +recommended that for successive pagination calls of this API, you continue to the use the +same parameter/value pairs as used in the original call, e.g., if you used +IdentityType=Domain in the the original call and received a NextToken in the response, you +should continue providing the IdentityType=Domain parameter for further NextToken calls; +however, if you didn't provide the IdentityType parameter in the original call, then +continue to not provide it for successive pagination calls. Using this protocol will ensure +consistent results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IdentityType"`: The type of the identities to list. Possible values are - \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities will be + \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities are listed. - `"MaxItems"`: The maximum number of identities per page. Possible values are 1-1000 inclusive. @@ -1502,19 +1505,19 @@ end list_identity_policies(identity, params::Dict{String,<:Any}) Returns a list of sending authorization policies that are attached to the given identity -(an email address or a domain). This API returns only a list. If you want the actual policy -content, you can use GetIdentityPolicies. This API is for the identity owner only. If you -have not verified the identity, this API will return an error. Sending authorization is a +(an email address or a domain). This operation returns only a list. To get the actual +policy content, use GetIdentityPolicies. This operation is for the identity owner only. If +you have not verified the identity, it returns an error. Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide. You can execute this operation no more than once per second. # Arguments -- `identity`: The identity that is associated with the policy for which the policies will - be listed. You can specify an identity by using its name or by using its Amazon Resource - Name (ARN). Examples: user@example.com, example.com, - arn:aws:ses:us-east-1:123456789012:identity/example.com. To successfully call this API, you - must own the identity. +- `identity`: The identity that is associated with the policy for which the policies are + listed. You can specify an identity by using its name or by using its Amazon Resource Name + (ARN). Examples: user@example.com, example.com, + arn:aws:ses:us-east-1:123456789012:identity/example.com. To successfully call this + operation, you must own the identity. """ function list_identity_policies(Identity; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1544,9 +1547,9 @@ end list_receipt_filters() list_receipt_filters(params::Dict{String,<:Any}) -Lists the IP address filters associated with your AWS account in the current AWS Region. -For information about managing IP address filters, see the Amazon SES Developer Guide. You -can execute this operation no more than once per second. +Lists the IP address filters associated with your Amazon Web Services account in the +current Amazon Web Services Region. For information about managing IP address filters, see +the Amazon SES Developer Guide. You can execute this operation no more than once per second. """ function list_receipt_filters(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1564,11 +1567,12 @@ end list_receipt_rule_sets() list_receipt_rule_sets(params::Dict{String,<:Any}) -Lists the receipt rule sets that exist under your AWS account in the current AWS Region. If -there are additional receipt rule sets to be retrieved, you will receive a NextToken that -you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries. -For information about managing receipt rule sets, see the Amazon SES Developer Guide. You -can execute this operation no more than once per second. +Lists the receipt rule sets that exist under your Amazon Web Services account in the +current Amazon Web Services Region. If there are additional receipt rule sets to be +retrieved, you receive a NextToken that you can provide to the next call to +ListReceiptRuleSets to retrieve the additional entries. For information about managing +receipt rule sets, see the Amazon SES Developer Guide. You can execute this operation no +more than once per second. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1595,14 +1599,14 @@ end list_templates() list_templates(params::Dict{String,<:Any}) -Lists the email templates present in your Amazon SES account in the current AWS Region. You -can execute this operation no more than once per second. +Lists the email templates present in your Amazon SES account in the current Amazon Web +Services Region. You can execute this operation no more than once per second. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxItems"`: The maximum number of templates to return. This value must be at least 1 - and less than or equal to 10. If you do not specify a value, or if you specify a value less - than 1 or greater than 10, the operation will return up to 10 results. + and less than or equal to 100. If more than 100 items are requested, the page size will + automatically set to 100. If you do not specify a value, 10 is the default page size. - `"NextToken"`: A token returned from a previous call to ListTemplates to indicate the position in the list of email templates. """ @@ -1648,8 +1652,7 @@ end Adds or updates the delivery options for a configuration set. # Arguments -- `configuration_set_name`: The name of the configuration set that you want to specify the - delivery options for. +- `configuration_set_name`: The name of the configuration set. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1690,17 +1693,17 @@ end put_identity_policy(identity, policy, policy_name, params::Dict{String,<:Any}) Adds or updates a sending authorization policy for the specified identity (an email address -or a domain). This API is for the identity owner only. If you have not verified the -identity, this API will return an error. Sending authorization is a feature that enables -an identity owner to authorize other senders to use its identities. For information about -using sending authorization, see the Amazon SES Developer Guide. You can execute this -operation no more than once per second. +or a domain). This operation is for the identity owner only. If you have not verified the +identity, it returns an error. Sending authorization is a feature that enables an identity +owner to authorize other senders to use its identities. For information about using sending +authorization, see the Amazon SES Developer Guide. You can execute this operation no more +than once per second. # Arguments -- `identity`: The identity that the policy will apply to. You can specify an identity by +- `identity`: The identity to which that the policy applies. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. To successfully call - this API, you must own the identity. + this operation, you must own the identity. - `policy`: The text of the policy in JSON format. The policy cannot exceed 4 KB. For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide. @@ -1748,14 +1751,13 @@ end reorder_receipt_rule_set(rule_names, rule_set_name, params::Dict{String,<:Any}) Reorders the receipt rules within a receipt rule set. All of the rules in the rule set -must be represented in this request. That is, this API will return an error if the reorder -request doesn't explicitly position all of the rules. For information about managing -receipt rule sets, see the Amazon SES Developer Guide. You can execute this operation no -more than once per second. +must be represented in this request. That is, it is error if the reorder request doesn't +explicitly position all of the rules. For information about managing receipt rule sets, +see the Amazon SES Developer Guide. You can execute this operation no more than once per +second. # Arguments -- `rule_names`: A list of the specified receipt rule set's receipt rules in the order that - you want to put them. +- `rule_names`: The specified receipt rule set's receipt rules, in order. - `rule_set_name`: The name of the receipt rule set to reorder. """ @@ -1794,10 +1796,10 @@ end send_bounce(bounce_sender, bounced_recipient_info_list, original_message_id, params::Dict{String,<:Any}) Generates and sends a bounce message to the sender of an email you received through Amazon -SES. You can only use this API on an email up to 24 hours after you receive it. You cannot -use this API to send generic bounces for mail that was not received by Amazon SES. For -information about receiving email through Amazon SES, see the Amazon SES Developer Guide. -You can execute this operation no more than once per second. +SES. You can only use this operation on an email up to 24 hours after you receive it. You +cannot use this operation to send generic bounces for mail that was not received by Amazon +SES. For information about receiving email through Amazon SES, see the Amazon SES +Developer Guide. You can execute this operation no more than once per second. # Arguments - `bounce_sender`: The address to use in the \"From\" header of the bounce message. This @@ -1814,8 +1816,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to use the address in the \"From\" header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide. - `"Explanation"`: Human-readable text for the bounce message to explain the failure. If - not specified, the text will be auto-generated based on the bounced recipient information. -- `"MessageDsn"`: Message-related DSN fields. If not specified, Amazon SES will choose the + not specified, the text is auto-generated based on the bounced recipient information. +- `"MessageDsn"`: Message-related DSN fields. If not specified, Amazon SES chooses the values. """ function send_bounce( @@ -1865,28 +1867,27 @@ end send_bulk_templated_email(destinations, source, template, params::Dict{String,<:Any}) Composes an email message to multiple destinations. The message body is created using an -email template. In order to send email using the SendBulkTemplatedEmail operation, your -call to the API must meet the following requirements: The call must refer to an existing -email template. You can create email templates using the CreateTemplate operation. The -message must be sent from a verified email address or domain. If your account is still in -the Amazon SES sandbox, you may only send to verified addresses or domains, or to email -addresses associated with the Amazon SES Mailbox Simulator. For more information, see -Verifying Email Addresses and Domains in the Amazon SES Developer Guide. The maximum -message size is 10 MB. Each Destination parameter must include at least one recipient -email address. The recipient address can be a To: address, a CC: address, or a BCC: -address. If a recipient email address is invalid (that is, it is not in the format -UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if -the message contains other recipients that are valid. The message may not include more -than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email -message to a larger audience, you can divide your recipient list into groups of 50 or -fewer, and then call the SendBulkTemplatedEmail operation several times to send the message -to each group. The number of destinations you can contact in a single call to the API may -be limited by your account's maximum sending rate. +email template. To send email using this operation, your call must meet the following +requirements: The call must refer to an existing email template. You can create email +templates using CreateTemplate. The message must be sent from a verified email address or +domain. If your account is still in the Amazon SES sandbox, you may send only to verified +addresses or domains, or to email addresses associated with the Amazon SES Mailbox +Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon +SES Developer Guide. The maximum message size is 10 MB. Each Destination parameter +must include at least one recipient email address. The recipient address can be a To: +address, a CC: address, or a BCC: address. If a recipient email address is invalid (that +is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message +is rejected, even if the message contains other recipients that are valid. The message +may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need +to send an email message to a larger audience, you can divide your recipient list into +groups of 50 or fewer, and then call the SendBulkTemplatedEmail operation several times to +send the message to each group. The number of destinations you can contact in a single +call can be limited by your account's maximum sending rate. # Arguments - `destinations`: One or more Destination objects. All of the recipients in a Destination - will receive the same version of the email. You can specify up to 50 Destination objects - within a Destinations array. + receive the same version of the email. You can specify up to 50 Destination objects within + a Destinations array. - `source`: The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide. If you @@ -1894,13 +1895,13 @@ be limited by your account's maximum sending rate. authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide. Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the - local part of a source email address (the part of the email address that precedes the @ - sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part - after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as - described in RFC3492. The sender name (also known as the friendly name) may contain - non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as - described in RFC 2047. MIME encoded-word syntax uses the following form: - =?charset?encoding?encoded-text?=. + email address string must be 7-bit ASCII. If you want to send to or from email addresses + that contain Unicode characters in the domain part of an address, you must encode the + domain using Punycode. Punycode is not permitted in the local part of the email address + (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode + characters in the \"friendly from\" name, you must encode the \"friendly from\" name using + MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For + more information about Punycode, see RFC 3492. - `template`: The template to use when sending this email. # Optional Parameters @@ -1915,13 +1916,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template. - `"ReplyToAddresses"`: The reply-to email address(es) for the message. If the recipient - replies to the message, each reply-to address will receive the reply. -- `"ReturnPath"`: The email address that bounces and complaints will be forwarded to when + replies to the message, each reply-to address receives the reply. +- `"ReturnPath"`: The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then - an error message will be returned from the recipient's ISP; this message will then be - forwarded to the email address specified by the ReturnPath parameter. The ReturnPath - parameter is never overwritten. This email address must be either individually verified - with Amazon SES, or from a domain that has been verified with Amazon SES. + an error message is returned from the recipient's ISP; this message is forwarded to the + email address specified by the ReturnPath parameter. The ReturnPath parameter is never + overwritten. This email address must be either individually verified with Amazon SES, or + from a domain that has been verified with Amazon SES. - `"ReturnPathArn"`: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter. For example, if the owner @@ -1982,11 +1983,12 @@ end send_custom_verification_email(email_address, template_name, params::Dict{String,<:Any}) Adds an email address to the list of identities for your Amazon SES account in the current -AWS Region and attempts to verify it. As a result of executing this operation, a customized -verification email is sent to the specified address. To use this operation, you must first -create a custom verification email template. For more information about creating and using -custom verification email templates, see Using Custom Verification Email Templates in the -Amazon SES Developer Guide. You can execute this operation no more than once per second. +Amazon Web Services Region and attempts to verify it. As a result of executing this +operation, a customized verification email is sent to the specified address. To use this +operation, you must first create a custom verification email template. For more information +about creating and using custom verification email templates, see Using Custom Verification +Email Templates in the Amazon SES Developer Guide. You can execute this operation no more +than once per second. # Arguments - `email_address`: The email address to verify. @@ -2034,26 +2036,25 @@ end send_email(destination, message, source) send_email(destination, message, source, params::Dict{String,<:Any}) -Composes an email message and immediately queues it for sending. In order to send email -using the SendEmail operation, your message must meet the following requirements: The -message must be sent from a verified email address or domain. If you attempt to send email -using a non-verified address or domain, the operation will result in an \"Email address not -verified\" error. If your account is still in the Amazon SES sandbox, you may only send -to verified addresses or domains, or to email addresses associated with the Amazon SES -Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the -Amazon SES Developer Guide. The maximum message size is 10 MB. The message must -include at least one recipient email address. The recipient address can be a To: address, a -CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not -in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be -rejected, even if the message contains other recipients that are valid. The message may -not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to -send an email message to a larger audience, you can divide your recipient list into groups -of 50 or fewer, and then call the SendEmail operation several times to send the message to -each group. For every message that you send, the total number of recipients (including -each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of -emails you can send in a 24-hour period (your sending quota). For more information about -sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES -Developer Guide. +Composes an email message and immediately queues it for sending. To send email using this +operation, your message must meet the following requirements: The message must be sent +from a verified email address or domain. If you attempt to send email using a non-verified +address or domain, the operation results in an \"Email address not verified\" error. If +your account is still in the Amazon SES sandbox, you may only send to verified addresses or +domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more +information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide. + The maximum message size is 10 MB. The message must include at least one recipient email +address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a +recipient email address is invalid (that is, it is not in the format +UserName@[SubDomain.]Domain.TopLevelDomain), the entire message is rejected, even if the +message contains other recipients that are valid. The message may not include more than +50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to +a larger audience, you can divide your recipient list into groups of 50 or fewer, and then +call the SendEmail operation several times to send the message to each group. For every +message that you send, the total number of recipients (including each recipient in the To:, +CC: and BCC: fields) is counted against the maximum number of emails you can send in a +24-hour period (your sending quota). For more information about sending quotas in Amazon +SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide. # Arguments - `destination`: The destination for this email, composed of To:, CC:, and BCC: fields. @@ -2065,26 +2066,26 @@ Developer Guide. authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide. Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the - local part of a source email address (the part of the email address that precedes the @ - sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part - after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as - described in RFC3492. The sender name (also known as the friendly name) may contain - non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as - described in RFC 2047. MIME encoded-word syntax uses the following form: - =?charset?encoding?encoded-text?=. + email address string must be 7-bit ASCII. If you want to send to or from email addresses + that contain Unicode characters in the domain part of an address, you must encode the + domain using Punycode. Punycode is not permitted in the local part of the email address + (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode + characters in the \"friendly from\" name, you must encode the \"friendly from\" name using + MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For + more information about Punycode, see RFC 3492. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ConfigurationSetName"`: The name of the configuration set to use when you send an email using SendEmail. - `"ReplyToAddresses"`: The reply-to email address(es) for the message. If the recipient - replies to the message, each reply-to address will receive the reply. -- `"ReturnPath"`: The email address that bounces and complaints will be forwarded to when + replies to the message, each reply-to address receives the reply. +- `"ReturnPath"`: The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then - an error message will be returned from the recipient's ISP; this message will then be - forwarded to the email address specified by the ReturnPath parameter. The ReturnPath - parameter is never overwritten. This email address must be either individually verified - with Amazon SES, or from a domain that has been verified with Amazon SES. + an error message is returned from the recipient's ISP; this message is forwarded to the + email address specified by the ReturnPath parameter. The ReturnPath parameter is never + overwritten. This email address must be either individually verified with Amazon SES, or + from a domain that has been verified with Amazon SES. - `"ReturnPathArn"`: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter. For example, if the owner @@ -2145,9 +2146,9 @@ end send_raw_email(raw_message, params::Dict{String,<:Any}) Composes an email message and immediately queues it for sending. This operation is more -flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can +flexible than the SendEmail operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for -example, when you want to send a multipart MIME email (such a message that contains both a +example, when you need to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments. The SendRawEmail operation has the following requirements: You can only send email from verified email addresses or domains. If you try to send email from an address @@ -2169,22 +2170,22 @@ modify the contents of your message (for example, if you use open and click trac content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide. Additionally, keep the following considerations in mind when using the SendRawEmail operation: Although you can customize the message headers when using the -SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date -headers; if you passed these headers when creating the message, they will be overwritten by -the values that Amazon SES provides. If you are using sending authorization to send on -behalf of another user, SendRawEmail enables you to specify the cross-account identity for -the email's Source, From, and Return-Path parameters in one of two ways: you can pass -optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include -the following X-headers in the header of your raw email: X-SES-SOURCE-ARN -X-SES-FROM-ARN X-SES-RETURN-PATH-ARN Don't include these X-headers in the DKIM -signature. Amazon SES removes these before it sends the email. If you only specify the -SourceIdentityArn parameter, Amazon SES sets the From and Return-Path addresses to the same -identity that you specified. For more information about sending authorization, see the -Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide. For every -message that you send, the total number of recipients (including each recipient in the To:, -CC: and BCC: fields) is counted against the maximum number of emails you can send in a -24-hour period (your sending quota). For more information about sending quotas in Amazon -SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide. +SendRawEmail operation, Amazon SES automatically applies its own Message-ID and Date +headers; if you passed these headers when creating the message, they are overwritten by the +values that Amazon SES provides. If you are using sending authorization to send on behalf +of another user, SendRawEmail enables you to specify the cross-account identity for the +email's Source, From, and Return-Path parameters in one of two ways: you can pass optional +parameters SourceArn, FromArn, and/or ReturnPathArn, or you can include the following +X-headers in the header of your raw email: X-SES-SOURCE-ARN X-SES-FROM-ARN +X-SES-RETURN-PATH-ARN Don't include these X-headers in the DKIM signature. Amazon SES +removes these before it sends the email. If you only specify the SourceIdentityArn +parameter, Amazon SES sets the From and Return-Path addresses to the same identity that you +specified. For more information about sending authorization, see the Using Sending +Authorization with Amazon SES in the Amazon SES Developer Guide. For every message that +you send, the total number of recipients (including each recipient in the To:, CC: and BCC: +fields) is counted against the maximum number of emails you can send in a 24-hour period +(your sending quota). For more information about sending quotas in Amazon SES, see Managing +Your Amazon SES Sending Limits in the Amazon SES Developer Guide. # Arguments - `raw_message`: The raw email message itself. The message has to meet the following @@ -2226,16 +2227,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Source"`: The identity's email address. If you do not provide a value for this parameter, you must specify a \"From\" address in the raw text of the message. (You can also specify both.) Amazon SES does not support the SMTPUTF8 extension, as described - inRFC6531. For this reason, the local part of a source email address (the part of the email - address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain - part of an address (the part after the @ sign) contains non-ASCII characters, they must be - encoded using Punycode, as described in RFC3492. The sender name (also known as the - friendly name) may contain non-ASCII characters. These characters must be encoded using - MIME encoded-word syntax, as described in RFC 2047. MIME encoded-word syntax uses the - following form: =?charset?encoding?encoded-text?=. If you specify the Source parameter and - have feedback forwarding enabled, then bounces and complaints will be sent to this email - address. This takes precedence over any Return-Path header that you might include in the - raw text of the message. + inRFC6531. For this reason, the email address string must be 7-bit ASCII. If you want to + send to or from email addresses that contain Unicode characters in the domain part of an + address, you must encode the domain using Punycode. Punycode is not permitted in the local + part of the email address (the part before the @ sign) nor in the \"friendly from\" name. + If you want to use Unicode characters in the \"friendly from\" name, you must encode the + \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email + using the Amazon SES API. For more information about Punycode, see RFC 3492. If you + specify the Source parameter and have feedback forwarding enabled, then bounces and + complaints are sent to this email address. This takes precedence over any Return-Path + header that you might include in the raw text of the message. - `"SourceArn"`: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter. For example, if the owner of @@ -2278,30 +2279,29 @@ end send_templated_email(destination, source, template, template_data) send_templated_email(destination, source, template, template_data, params::Dict{String,<:Any}) -Composes an email message using an email template and immediately queues it for sending. In -order to send email using the SendTemplatedEmail operation, your call to the API must meet -the following requirements: The call must refer to an existing email template. You can -create email templates using the CreateTemplate operation. The message must be sent from -a verified email address or domain. If your account is still in the Amazon SES sandbox, -you may only send to verified addresses or domains, or to email addresses associated with -the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and -Domains in the Amazon SES Developer Guide. The maximum message size is 10 MB. Calls to -the SendTemplatedEmail operation may only include one Destination parameter. A destination -is a set of recipients who will receive the same version of the email. The Destination -parameter can include up to 50 recipients, across the To:, CC: and BCC: fields. The -Destination parameter must include at least one recipient email address. The recipient -address can be a To: address, a CC: address, or a BCC: address. If a recipient email -address is invalid (that is, it is not in the format -UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if -the message contains other recipients that are valid. If your call to the -SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it -and returns a Message ID. However, if Amazon SES can't render the email because the -template contains errors, it doesn't send the email. Additionally, because it already -accepted the message, Amazon SES doesn't return a message stating that it was unable to -send the email. For these reasons, we highly recommend that you set up Amazon SES to send -you notifications when Rendering Failure events occur. For more information, see Sending -Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer -Guide. +Composes an email message using an email template and immediately queues it for sending. To +send email using this operation, your call must meet the following requirements: The call +must refer to an existing email template. You can create email templates using the +CreateTemplate operation. The message must be sent from a verified email address or +domain. If your account is still in the Amazon SES sandbox, you may only send to verified +addresses or domains, or to email addresses associated with the Amazon SES Mailbox +Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon +SES Developer Guide. The maximum message size is 10 MB. Calls to the +SendTemplatedEmail operation may only include one Destination parameter. A destination is a +set of recipients that receives the same version of the email. The Destination parameter +can include up to 50 recipients, across the To:, CC: and BCC: fields. The Destination +parameter must include at least one recipient email address. The recipient address can be a +To: address, a CC: address, or a BCC: address. If a recipient email address is invalid +(that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire +message is rejected, even if the message contains other recipients that are valid. If +your call to the SendTemplatedEmail operation includes all of the required parameters, +Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the +email because the template contains errors, it doesn't send the email. Additionally, +because it already accepted the message, Amazon SES doesn't return a message stating that +it was unable to send the email. For these reasons, we highly recommend that you set up +Amazon SES to send you notifications when Rendering Failure events occur. For more +information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple +Email Service Developer Guide. # Arguments - `destination`: The destination for this email, composed of To:, CC:, and BCC: fields. A @@ -2312,14 +2312,14 @@ Guide. are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide. Amazon SES - does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the - local part of a source email address (the part of the email address that precedes the @ - sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part - after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as - described in RFC3492. The sender name (also known as the friendly name) may contain - non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as - described inRFC 2047. MIME encoded-word syntax uses the following form: - =?charset?encoding?encoded-text?=. + does not support the SMTPUTF8 extension, as described in RFC6531. for this reason, The + email address string must be 7-bit ASCII. If you want to send to or from email addresses + that contain Unicode characters in the domain part of an address, you must encode the + domain using Punycode. Punycode is not permitted in the local part of the email address + (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode + characters in the \"friendly from\" name, you must encode the \"friendly from\" name using + MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For + more information about Punycode, see RFC 3492. - `template`: The template to use when sending this email. - `template_data`: A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to @@ -2330,13 +2330,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ConfigurationSetName"`: The name of the configuration set to use when you send an email using SendTemplatedEmail. - `"ReplyToAddresses"`: The reply-to email address(es) for the message. If the recipient - replies to the message, each reply-to address will receive the reply. -- `"ReturnPath"`: The email address that bounces and complaints will be forwarded to when + replies to the message, each reply-to address receives the reply. +- `"ReturnPath"`: The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then - an error message will be returned from the recipient's ISP; this message will then be - forwarded to the email address specified by the ReturnPath parameter. The ReturnPath - parameter is never overwritten. This email address must be either individually verified - with Amazon SES, or from a domain that has been verified with Amazon SES. + an error message is returned from the recipient's ISP; this message is forwarded to the + email address specified by the ReturnPath parameter. The ReturnPath parameter is never + overwritten. This email address must be either individually verified with Amazon SES, or + from a domain that has been verified with Amazon SES. - `"ReturnPathArn"`: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter. For example, if the owner @@ -2409,9 +2409,9 @@ end set_active_receipt_rule_set(params::Dict{String,<:Any}) Sets the specified receipt rule set as the active receipt rule set. To disable your -email-receiving through Amazon SES completely, you can call this API with RuleSetName set -to null. For information about managing receipt rule sets, see the Amazon SES Developer -Guide. You can execute this operation no more than once per second. +email-receiving through Amazon SES completely, you can call this operation with RuleSetName +set to null. For information about managing receipt rule sets, see the Amazon SES +Developer Guide. You can execute this operation no more than once per second. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2498,12 +2498,12 @@ operation no more than once per second. For more information about using notific Amazon SES, see the Amazon SES Developer Guide. # Arguments -- `forwarding_enabled`: Sets whether Amazon SES will forward bounce and complaint - notifications as email. true specifies that Amazon SES will forward bounce and complaint - notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. - false specifies that Amazon SES will publish bounce and complaint notifications only - through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for - both Bounce and Complaint notification types. +- `forwarding_enabled`: Sets whether Amazon SES forwards bounce and complaint notifications + as email. true specifies that Amazon SES forwards bounce and complaint notifications as + email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies + that Amazon SES publishes bounce and complaint notifications only through Amazon SNS. This + value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint + notification types. - `identity`: The identity for which to set bounce and complaint notification forwarding. Examples: user@example.com, example.com. @@ -2552,8 +2552,8 @@ information about using notifications with Amazon SES, see the Amazon SES Develo # Arguments - `enabled`: Sets whether Amazon SES includes the original email headers in Amazon SNS notifications of the specified notification type. A value of true specifies that Amazon SES - will include headers in notifications, and a value of false specifies that Amazon SES will - not include headers in notifications. This value can only be set when NotificationType is + includes headers in notifications, and a value of false specifies that Amazon SES does not + include headers in notifications. This value can only be set when NotificationType is already set to use a particular Amazon SNS topic. - `identity`: The identity for which to enable or disable headers in notifications. Examples: user@example.com, example.com. @@ -2606,29 +2606,27 @@ end Enables or disables the custom MAIL FROM domain setup for a verified identity (an email address or a domain). To send emails using the specified MAIL FROM domain, you must add an -MX record to your MAIL FROM domain's DNS settings. If you want your emails to pass Sender +MX record to your MAIL FROM domain's DNS settings. To ensure that your emails pass Sender Policy Framework (SPF) checks, you must also add or update an SPF record. For more information, see the Amazon SES Developer Guide. You can execute this operation no more than once per second. # Arguments -- `identity`: The verified identity for which you want to enable or disable the specified - custom MAIL FROM domain. +- `identity`: The verified identity. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BehaviorOnMXFailure"`: The action that you want Amazon SES to take if it cannot - successfully read the required MX record when you send an email. If you choose - UseDefaultValue, Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL - FROM domain. If you choose RejectMessage, Amazon SES will return a - MailFromDomainNotVerified error and not send the email. The action specified in - BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup is in the Pending, - Failed, and TemporaryFailure states. -- `"MailFromDomain"`: The custom MAIL FROM domain that you want the verified identity to - use. The MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used - in a \"From\" address if the MAIL FROM domain is the destination of email feedback - forwarding (for more information, see the Amazon SES Developer Guide), and 3) not be used - to receive emails. A value of null disables the custom MAIL FROM setting for the identity. +- `"BehaviorOnMXFailure"`: The action for Amazon SES to take if it cannot successfully read + the required MX record when you send an email. If you choose UseDefaultValue, Amazon SES + uses amazonses.com (or a subdomain of that) as the MAIL FROM domain. If you choose + RejectMessage, Amazon SES returns a MailFromDomainNotVerified error and not send the email. + The action specified in BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup + is in the Pending, Failed, and TemporaryFailure states. +- `"MailFromDomain"`: The custom MAIL FROM domain for the verified identity to use. The + MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used in a + \"From\" address if the MAIL FROM domain is the destination of email feedback forwarding + (for more information, see the Amazon SES Developer Guide), and 3) not be used to receive + emails. A value of null disables the custom MAIL FROM setting for the identity. """ function set_identity_mail_from_domain( Identity; aws_config::AbstractAWSConfig=global_aws_config() @@ -2668,13 +2666,13 @@ topic that you specify. You can execute this operation no more than once per sec more information about feedback notification, see the Amazon SES Developer Guide. # Arguments -- `identity`: The identity (email address or domain) that you want to set the Amazon SNS - topic for. You can only specify a verified identity for this parameter. You can specify - an identity by using its name or by using its Amazon Resource Name (ARN). The following - examples are all valid identities: sender@example.com, example.com, +- `identity`: The identity (email address or domain) for the Amazon SNS topic. You can + only specify a verified identity for this parameter. You can specify an identity by using + its name or by using its Amazon Resource Name (ARN). The following examples are all valid + identities: sender@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. -- `notification_type`: The type of notifications that will be published to the specified - Amazon SNS topic. +- `notification_type`: The type of notifications that are published to the specified Amazon + SNS topic. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2772,7 +2770,7 @@ of replacement data. You can execute this operation no more than once per second - `template_data`: A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template. -- `template_name`: The name of the template that you want to render. +- `template_name`: The name of the template to render. """ function test_render_template( @@ -2811,16 +2809,17 @@ end update_account_sending_enabled() update_account_sending_enabled(params::Dict{String,<:Any}) -Enables or disables email sending across your entire Amazon SES account in the current AWS -Region. You can use this operation in conjunction with Amazon CloudWatch alarms to -temporarily pause email sending across your Amazon SES account in a given AWS Region when -reputation metrics (such as your bounce or complaint rates) reach certain thresholds. You -can execute this operation no more than once per second. +Enables or disables email sending across your entire Amazon SES account in the current +Amazon Web Services Region. You can use this operation in conjunction with Amazon +CloudWatch alarms to temporarily pause email sending across your Amazon SES account in a +given Amazon Web Services Region when reputation metrics (such as your bounce or complaint +rates) reach certain thresholds. You can execute this operation no more than once per +second. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Enabled"`: Describes whether email sending is enabled or disabled for your Amazon SES - account in the current AWS Region. + account in the current Amazon Web Services Region. """ function update_account_sending_enabled(; aws_config::AbstractAWSConfig=global_aws_config()) return ses( @@ -2855,9 +2854,8 @@ You can execute this operation no more than once per second. # Arguments - `configuration_set_name`: The name of the configuration set that contains the event - destination that you want to update. -- `event_destination`: The event destination object that you want to apply to the specified - configuration set. + destination. +- `event_destination`: The event destination object. """ function update_configuration_set_event_destination( @@ -2903,14 +2901,14 @@ end update_configuration_set_reputation_metrics_enabled(configuration_set_name, enabled, params::Dict{String,<:Any}) Enables or disables the publishing of reputation metrics for emails sent using a specific -configuration set in a given AWS Region. Reputation metrics include bounce and complaint -rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, you can -create alarms when bounce or complaint rates exceed certain thresholds. You can execute -this operation no more than once per second. +configuration set in a given Amazon Web Services Region. Reputation metrics include bounce +and complaint rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, +you can create alarms when bounce or complaint rates exceed certain thresholds. You can +execute this operation no more than once per second. # Arguments -- `configuration_set_name`: The name of the configuration set that you want to update. -- `enabled`: Describes whether or not Amazon SES will publish reputation metrics for the +- `configuration_set_name`: The name of the configuration set to update. +- `enabled`: Describes whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. """ @@ -2953,13 +2951,13 @@ end update_configuration_set_sending_enabled(configuration_set_name, enabled, params::Dict{String,<:Any}) Enables or disables email sending for messages sent using a specific configuration set in a -given AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms -to temporarily pause email sending for a configuration set when the reputation metrics for -that configuration set (such as your bounce on complaint rate) exceed certain thresholds. -You can execute this operation no more than once per second. +given Amazon Web Services Region. You can use this operation in conjunction with Amazon +CloudWatch alarms to temporarily pause email sending for a configuration set when the +reputation metrics for that configuration set (such as your bounce on complaint rate) +exceed certain thresholds. You can execute this operation no more than once per second. # Arguments -- `configuration_set_name`: The name of the configuration set that you want to update. +- `configuration_set_name`: The name of the configuration set to update. - `enabled`: Describes whether email sending is enabled or disabled for the configuration set. @@ -3009,8 +3007,7 @@ handle these events. For information about using custom domains, see the Amazon Developer Guide. # Arguments -- `configuration_set_name`: The name of the configuration set for which you want to update - the custom tracking domain. +- `configuration_set_name`: The name of the configuration set. - `tracking_options`: """ @@ -3059,8 +3056,7 @@ verification email templates, see Using Custom Verification Email Templates in t SES Developer Guide. You can execute this operation no more than once per second. # Arguments -- `template_name`: The name of the custom verification email template that you want to - update. +- `template_name`: The name of the custom verification email template to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3147,8 +3143,8 @@ end update_template(template, params::Dict{String,<:Any}) Updates an email template. Email templates enable you to send personalized email to one or -more destinations in a single API operation. For more information, see the Amazon SES -Developer Guide. You can execute this operation no more than once per second. +more destinations in a single operation. For more information, see the Amazon SES Developer +Guide. You can execute this operation no more than once per second. # Arguments - `template`: @@ -3225,10 +3221,10 @@ end verify_domain_identity(domain) verify_domain_identity(domain, params::Dict{String,<:Any}) -Adds a domain to the list of identities for your Amazon SES account in the current AWS -Region and attempts to verify it. For more information about verifying domains, see -Verifying Email Addresses and Domains in the Amazon SES Developer Guide. You can execute -this operation no more than once per second. +Adds a domain to the list of identities for your Amazon SES account in the current Amazon +Web Services Region and attempts to verify it. For more information about verifying +domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide. You +can execute this operation no more than once per second. # Arguments - `domain`: The domain to be verified. @@ -3293,9 +3289,9 @@ end verify_email_identity(email_address, params::Dict{String,<:Any}) Adds an email address to the list of identities for your Amazon SES account in the current -AWS region and attempts to verify it. As a result of executing this operation, a -verification email is sent to the specified address. You can execute this operation no more -than once per second. +Amazon Web Services Region and attempts to verify it. As a result of executing this +operation, a verification email is sent to the specified address. You can execute this +operation no more than once per second. # Arguments - `email_address`: The email address to be verified. diff --git a/src/services/sesv2.jl b/src/services/sesv2.jl index 188d9c5303..8ca722ef37 100644 --- a/src/services/sesv2.jl +++ b/src/services/sesv2.jl @@ -37,6 +37,36 @@ function batch_get_metric_data( ) end +""" + cancel_export_job(job_id) + cancel_export_job(job_id, params::Dict{String,<:Any}) + +Cancels an export job. + +# Arguments +- `job_id`: The export job ID. + +""" +function cancel_export_job(JobId; aws_config::AbstractAWSConfig=global_aws_config()) + return sesv2( + "PUT", + "/v2/email/export-jobs/$(JobId)/cancel"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_export_job( + JobId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sesv2( + "PUT", + "/v2/email/export-jobs/$(JobId)/cancel", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_configuration_set(configuration_set_name) create_configuration_set(configuration_set_name, params::Dict{String,<:Any}) @@ -105,10 +135,9 @@ end Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about -these events to. For example, you can send event data to Amazon SNS to receive -notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data -Firehose to stream data to Amazon S3 for long-term storage. A single configuration set can -include more than one event destination. +these events to. For example, you can send event data to Amazon EventBridge and associate a +rule to send the event to the specified target. A single configuration set can include more +than one event destination. # Arguments - `configuration_set_name`: The name of the configuration set . @@ -598,6 +627,55 @@ function create_email_template( ) end +""" + create_export_job(export_data_source, export_destination) + create_export_job(export_data_source, export_destination, params::Dict{String,<:Any}) + +Creates an export job for a data source and destination. You can execute this operation no +more than once per second. + +# Arguments +- `export_data_source`: The data source for the export job. +- `export_destination`: The destination for the export job. + +""" +function create_export_job( + ExportDataSource, ExportDestination; aws_config::AbstractAWSConfig=global_aws_config() +) + return sesv2( + "POST", + "/v2/email/export-jobs", + Dict{String,Any}( + "ExportDataSource" => ExportDataSource, "ExportDestination" => ExportDestination + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_export_job( + ExportDataSource, + ExportDestination, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sesv2( + "POST", + "/v2/email/export-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ExportDataSource" => ExportDataSource, + "ExportDestination" => ExportDestination, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_import_job(import_data_source, import_destination) create_import_job(import_data_source, import_destination, params::Dict{String,<:Any}) @@ -690,9 +768,8 @@ end Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about -these events to. For example, you can send event data to Amazon SNS to receive -notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data -Firehose to stream data to Amazon S3 for long-term storage. +these events to. For example, you can send event data to Amazon EventBridge and associate a +rule to send the event to the specified target. # Arguments - `configuration_set_name`: The name of the configuration set that contains the event @@ -1127,9 +1204,8 @@ end Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, -you can send event data to Amazon SNS to receive notifications when you receive bounces or -complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for -long-term storage. +you can send event data to Amazon EventBridge and associate a rule to send the event to the +specified target. # Arguments - `configuration_set_name`: The name of the configuration set that contains the event @@ -1634,6 +1710,36 @@ function get_email_template( ) end +""" + get_export_job(job_id) + get_export_job(job_id, params::Dict{String,<:Any}) + +Provides information about an export job. + +# Arguments +- `job_id`: The export job ID. + +""" +function get_export_job(JobId; aws_config::AbstractAWSConfig=global_aws_config()) + return sesv2( + "GET", + "/v2/email/export-jobs/$(JobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_export_job( + JobId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sesv2( + "GET", + "/v2/email/export-jobs/$(JobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_import_job(job_id) get_import_job(job_id, params::Dict{String,<:Any}) @@ -1664,6 +1770,41 @@ function get_import_job( ) end +""" + get_message_insights(message_id) + get_message_insights(message_id, params::Dict{String,<:Any}) + +Provides information about a specific message, including the from address, the subject, the +recipient address, email tags, as well as events associated with the message. You can +execute this operation no more than once per second. + +# Arguments +- `message_id`: A MessageId is a unique identifier for a message, and is returned when + sending emails through Amazon SES. + +""" +function get_message_insights(MessageId; aws_config::AbstractAWSConfig=global_aws_config()) + return sesv2( + "GET", + "/v2/email/insights/$(MessageId)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_message_insights( + MessageId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sesv2( + "GET", + "/v2/email/insights/$(MessageId)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_suppressed_destination(email_address) get_suppressed_destination(email_address, params::Dict{String,<:Any}) @@ -1797,8 +1938,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys """ function list_contacts(ContactListName; aws_config::AbstractAWSConfig=global_aws_config()) return sesv2( - "GET", - "/v2/email/contact-lists/$(ContactListName)/contacts"; + "POST", + "/v2/email/contact-lists/$(ContactListName)/contacts/list"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1809,8 +1950,8 @@ function list_contacts( aws_config::AbstractAWSConfig=global_aws_config(), ) return sesv2( - "GET", - "/v2/email/contact-lists/$(ContactListName)/contacts", + "POST", + "/v2/email/contact-lists/$(ContactListName)/contacts/list", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2043,7 +2184,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"PageSize"`: The number of results to show in a single call to ListEmailTemplates. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results. The - value you specify has to be at least 1, and can be no more than 10. + value you specify has to be at least 1, and can be no more than 100. """ function list_email_templates(; aws_config::AbstractAWSConfig=global_aws_config()) return sesv2( @@ -2062,6 +2203,44 @@ function list_email_templates( ) end +""" + list_export_jobs() + list_export_jobs(params::Dict{String,<:Any}) + +Lists all of the export jobs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExportSourceType"`: A value used to list export jobs that have a certain + ExportSourceType. +- `"JobStatus"`: A value used to list export jobs that have a certain JobStatus. +- `"NextToken"`: The pagination token returned from a previous call to ListExportJobs to + indicate the position in the list of export jobs. +- `"PageSize"`: Maximum number of export jobs to return at once. Use this parameter to + paginate results. If additional export jobs exist beyond the specified limit, the NextToken + element is sent in the response. Use the NextToken value in subsequent calls to + ListExportJobs to retrieve additional export jobs. +""" +function list_export_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sesv2( + "POST", + "/v2/email/list-export-jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_export_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sesv2( + "POST", + "/v2/email/list-export-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_import_jobs() list_import_jobs(params::Dict{String,<:Any}) @@ -2082,8 +2261,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys """ function list_import_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) return sesv2( - "GET", - "/v2/email/import-jobs"; + "POST", + "/v2/email/import-jobs/list"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2092,8 +2271,8 @@ function list_import_jobs( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return sesv2( - "GET", - "/v2/email/import-jobs", + "POST", + "/v2/email/import-jobs/list", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2275,11 +2454,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ProductionAccessEnabled"`: Indicates whether or not your account should have production access in the current Amazon Web Services Region. If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified - identities. Additionally, the maximum number of emails you can send in a 24-hour period - (your sending quota) is 200, and the maximum number of emails you can send per second (your - maximum sending rate) is 1. If the value is true, then your account has production access. - When your account has production access, you can send email to any address. The sending - quota and maximum sending rate for your account vary based on your specific use case. + identities. If the value is true, then your account has production access. When your + account has production access, you can send email to any address. The sending quota and + maximum sending rate for your account vary based on your specific use case. """ function put_account_details( MailType, @@ -3284,7 +3461,7 @@ replaces the tags with values that you specify. # Arguments - `content`: An object that contains the body of the message. You can send either a Simple - message Raw message or a template Message. + message, Raw message, or a Templated message. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3490,8 +3667,7 @@ end Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event -data to Amazon SNS to receive notifications when you receive bounces or complaints, or you -can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. +data to Amazon EventBridge and associate a rule to send the event to the specified target. # Arguments - `configuration_set_name`: The name of the configuration set that contains the event @@ -3538,8 +3714,9 @@ end update_contact(contact_list_name, email_address) update_contact(contact_list_name, email_address, params::Dict{String,<:Any}) -Updates a contact's preferences for a list. It is not necessary to specify all existing -topic preferences in the TopicPreferences object, just the ones that need updating. +Updates a contact's preferences for a list. You must specify all existing topic +preferences in the TopicPreferences object, not just the ones that need updating; +otherwise, all your existing preferences will be removed. # Arguments - `contact_list_name`: The name of the contact list. diff --git a/src/services/sfn.jl b/src/services/sfn.jl index 9a5f6e5287..dbd8f7bdbf 100644 --- a/src/services/sfn.jl +++ b/src/services/sfn.jl @@ -62,14 +62,16 @@ Creates a state machine. A state machine consists of a collection of states that work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the -Step Functions User Guide. This operation is eventually consistent. The results are best -effort and may not reflect very recent updates and changes. CreateStateMachine is an -idempotent API. Subsequent requests won’t create a duplicate resource if it was already -created. CreateStateMachine's idempotency check is based on the state machine name, -definition, type, LoggingConfiguration and TracingConfiguration. If a following request has -a different roleArn or tags, Step Functions will ignore these differences and treat it as -an idempotent request of the previous. In this case, roleArn and tags will not be updated, -even if they are different. +Step Functions User Guide. If you set the publish parameter of this API action to true, it +publishes version 1 as the first revision of the state machine. This operation is +eventually consistent. The results are best effort and may not reflect very recent updates +and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create +a duplicate resource if it was already created. CreateStateMachine's idempotency check is +based on the state machine name, definition, type, LoggingConfiguration, and +TracingConfiguration. The check is also based on the publish and versionDescription +parameters. If a following request has a different roleArn or tags, Step Functions will +ignore these differences and treat it as an idempotent request of the previous. In this +case, roleArn and tags will not be updated, even if they are different. # Arguments - `definition`: The Amazon States Language definition of the state machine. See Amazon @@ -85,6 +87,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"loggingConfiguration"`: Defines what execution history events are logged and where they are logged. By default, the level is set to OFF. For more information see Log Levels in the Step Functions User Guide. +- `"publish"`: Set to true to publish the first version of the state machine during + creation. The default is false. - `"tags"`: Tags to be added when creating a state machine. An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags. Tags may only contain @@ -92,6 +96,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tracingConfiguration"`: Selects whether X-Ray tracing is enabled. - `"type"`: Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. +- `"versionDescription"`: Sets description about the state machine version. You can only + set the description if the publish parameter is set to true. Otherwise, if you set + versionDescription, but publish to false, this API action throws ValidationException. """ function create_state_machine( definition, name, roleArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -126,6 +133,72 @@ function create_state_machine( ) end +""" + create_state_machine_alias(name, routing_configuration) + create_state_machine_alias(name, routing_configuration, params::Dict{String,<:Any}) + +Creates an alias for a state machine that points to one or two versions of the same state +machine. You can set your application to call StartExecution with an alias and update the +version the alias uses without changing the client's code. You can also map an alias to +split StartExecution requests between two versions of a state machine. To do this, add a +second RoutingConfig object in the routingConfiguration parameter. You must also specify +the percentage of execution run requests each version should receive in both RoutingConfig +objects. Step Functions randomly chooses which version runs a given execution based on the +percentage you specify. To create an alias that points to a single version, specify a +single RoutingConfig object with a weight set to 100. You can create up to 100 aliases for +each state machine. You must delete unused aliases using the DeleteStateMachineAlias API +action. CreateStateMachineAlias is an idempotent API. Step Functions bases the idempotency +check on the stateMachineArn, description, name, and routingConfiguration parameters. +Requests that contain the same values for these parameters return a successful idempotent +response without creating a duplicate resource. Related operations: +DescribeStateMachineAlias ListStateMachineAliases UpdateStateMachineAlias +DeleteStateMachineAlias + +# Arguments +- `name`: The name of the state machine alias. To avoid conflict with version ARNs, don't + use an integer in the name of the alias. +- `routing_configuration`: The routing configuration of a state machine alias. The routing + configuration shifts execution traffic between two state machine versions. + routingConfiguration contains an array of RoutingConfig objects that specify up to two + state machine versions. Step Functions then randomly choses which version to run an + execution with based on the weight assigned to each RoutingConfig. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the state machine alias. +""" +function create_state_machine_alias( + name, routingConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "CreateStateMachineAlias", + Dict{String,Any}("name" => name, "routingConfiguration" => routingConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_state_machine_alias( + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "CreateStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "routingConfiguration" => routingConfiguration + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_activity(activity_arn) delete_activity(activity_arn, params::Dict{String,<:Any}) @@ -163,16 +236,22 @@ end delete_state_machine(state_machine_arn) delete_state_machine(state_machine_arn, params::Dict{String,<:Any}) -Deletes a state machine. This is an asynchronous operation: It sets the state machine's -status to DELETING and begins the deletion process. If the given state machine Amazon -Resource Name (ARN) is a qualified state machine ARN, it will fail with -ValidationException. A qualified state machine ARN refers to a Distributed Map state -defined within a state machine. For example, the qualified state machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. For EXPRESS state machines, the deletion will happen eventually (usually -less than a minute). Running executions may emit logs after DeleteStateMachine API is -called. +Deletes a state machine. This is an asynchronous operation. It sets the state machine's +status to DELETING and begins the deletion process. A state machine is deleted only when +all its executions are completed. On the next state transition, the state machine's +executions are terminated. A qualified state machine ARN can either refer to a Distributed +Map state defined within a state machine, a version ARN, or an alias ARN. The following are +some examples of qualified and unqualified state machine ARNs: The following qualified +state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state +machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following unqualified state machine ARN refers to a +state machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine This API action also +deletes all versions and aliases associated with a state machine. For EXPRESS state +machines, the deletion happens eventually (usually in less than a minute). Running +executions may emit logs after DeleteStateMachine API is called. # Arguments - `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to delete. @@ -205,6 +284,96 @@ function delete_state_machine( ) end +""" + delete_state_machine_alias(state_machine_alias_arn) + delete_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Deletes a state machine alias. After you delete a state machine alias, you can't use it to +start executions. When you delete a state machine alias, Step Functions doesn't delete the +state machine versions that alias references. Related operations: +CreateStateMachineAlias DescribeStateMachineAlias ListStateMachineAliases +UpdateStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias to + delete. + +""" +function delete_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DeleteStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DeleteStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_state_machine_version(state_machine_version_arn) + delete_state_machine_version(state_machine_version_arn, params::Dict{String,<:Any}) + +Deletes a state machine version. After you delete a version, you can't call StartExecution +using that version's ARN or use the version with a state machine alias. Deleting a state +machine version won't terminate its in-progress executions. You can't delete a state +machine version currently referenced by one or more aliases. Before you delete a version, +you must either delete the aliases or update them to point to another state machine +version. Related operations: PublishStateMachineVersion ListStateMachineVersions + + +# Arguments +- `state_machine_version_arn`: The Amazon Resource Name (ARN) of the state machine version + to delete. + +""" +function delete_state_machine_version( + stateMachineVersionArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DeleteStateMachineVersion", + Dict{String,Any}("stateMachineVersionArn" => stateMachineVersionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_state_machine_version( + stateMachineVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DeleteStateMachineVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineVersionArn" => stateMachineVersionArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_activity(activity_arn) describe_activity(activity_arn, params::Dict{String,<:Any}) @@ -243,12 +412,15 @@ end describe_execution(execution_arn) describe_execution(execution_arn, params::Dict{String,<:Any}) -Provides all information about a state machine execution, such as the state machine -associated with the execution, the execution input and output, and relevant execution -metadata. Use this API action to return the Map Run ARN if the execution was dispatched by -a Map Run. This operation is eventually consistent. The results are best effort and may -not reflect very recent updates and changes. This API action is not supported by EXPRESS -state machine executions unless they were dispatched by a Map Run. +Provides information about a state machine execution, such as the state machine associated +with the execution, the execution input and output, and relevant execution metadata. If +you've redriven an execution, you can use this API action to return information about the +redrives of that execution. In addition, you can use this API action to return the Map Run +Amazon Resource Name (ARN) if the execution was dispatched by a Map Run. If you specify a +version or alias ARN when you call the StartExecution API action, DescribeExecution returns +that ARN. This operation is eventually consistent. The results are best effort and may not +reflect very recent updates and changes. Executions of an EXPRESS state machine aren't +supported by DescribeExecution unless a Map Run dispatched them. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to describe. @@ -281,8 +453,9 @@ end describe_map_run(map_run_arn) describe_map_run(map_run_arn, params::Dict{String,<:Any}) -Provides information about a Map Run's configuration, progress, and results. For more -information, see Examining Map Run in the Step Functions Developer Guide. +Provides information about a Map Run's configuration, progress, and results. If you've +redriven a Map Run, this API action also returns information about the redrives of that Map +Run. For more information, see Examining Map Run in the Step Functions Developer Guide. # Arguments - `map_run_arn`: The Amazon Resource Name (ARN) that identifies a Map Run. @@ -316,17 +489,30 @@ end describe_state_machine(state_machine_arn, params::Dict{String,<:Any}) Provides information about a state machine's definition, its IAM role Amazon Resource Name -(ARN), and configuration. If the state machine ARN is a qualified state machine ARN, the -response returned includes the Map state's label. A qualified state machine ARN refers to a -Distributed Map state defined within a state machine. For example, the qualified state -machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. This operation is eventually consistent. The results are best effort and -may not reflect very recent updates and changes. +(ARN), and configuration. A qualified state machine ARN can either refer to a Distributed +Map state defined within a state machine, a version ARN, or an alias ARN. The following are +some examples of qualified and unqualified state machine ARNs: The following qualified +state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state +machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> This API action returns the details for a state machine version if the +stateMachineArn you specify is a state machine version ARN. This operation is eventually +consistent. The results are best effort and may not reflect very recent updates and +changes. # Arguments -- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to describe. +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine for which you + want the information. If you specify a state machine version ARN, this API returns details + about that version. The version ARN is a combination of state machine ARN and the version + number separated by a colon (:). For example, stateMachineARN:1. """ function describe_state_machine( @@ -356,16 +542,57 @@ function describe_state_machine( ) end +""" + describe_state_machine_alias(state_machine_alias_arn) + describe_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Returns details about a state machine alias. Related operations: +CreateStateMachineAlias ListStateMachineAliases UpdateStateMachineAlias +DeleteStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias. + +""" +function describe_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DescribeStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DescribeStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_state_machine_for_execution(execution_arn) describe_state_machine_for_execution(execution_arn, params::Dict{String,<:Any}) Provides information about a state machine's definition, its execution role ARN, and -configuration. If an execution was dispatched by a Map Run, the Map Run is returned in the -response. Additionally, the state machine returned will be the state machine associated -with the Map Run. This operation is eventually consistent. The results are best effort and -may not reflect very recent updates and changes. This API action is not supported by -EXPRESS state machines. +configuration. If a Map Run dispatched the execution, this action returns the Map Run +Amazon Resource Name (ARN) in the response. The state machine returned is the state machine +associated with the Map Run. This operation is eventually consistent. The results are best +effort and may not reflect very recent updates and changes. This API action is not +supported by EXPRESS state machines. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution you want state machine @@ -540,14 +767,16 @@ end Lists all executions of a state machine or a Map Run. You can list all executions related to a state machine by specifying a state machine Amazon Resource Name (ARN), or those -related to a Map Run by specifying a Map Run ARN. Results are sorted by time, with the most -recent execution first. If nextToken is returned, there are more results available. The -value of nextToken is a unique pagination token for each page. Make the call again using -the returned token to retrieve the next page. Keep all other arguments unchanged. Each -pagination token expires after 24 hours. Using an expired pagination token will return an -HTTP 400 InvalidToken error. This operation is eventually consistent. The results are best -effort and may not reflect very recent updates and changes. This API action is not -supported by EXPRESS state machines. +related to a Map Run by specifying a Map Run ARN. Using this API action, you can also list +all redriven executions. You can also provide a state machine alias ARN or version ARN to +list the executions associated with a specific alias or version. Results are sorted by +time, with the most recent execution first. If nextToken is returned, there are more +results available. The value of nextToken is a unique pagination token for each page. Make +the call again using the returned token to retrieve the next page. Keep all other arguments +unchanged. Each pagination token expires after 24 hours. Using an expired pagination token +will return an HTTP 400 InvalidToken error. This operation is eventually consistent. The +results are best effort and may not reflect very recent updates and changes. This API +action is not supported by EXPRESS state machines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -565,8 +794,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. +- `"redriveFilter"`: Sets a filter to list executions based on whether or not they have + been redriven. For a Distributed Map, redriveFilter sets a filter to list child workflow + executions based on whether or not they have been redriven. If you do not provide a + redriveFilter, Step Functions returns a list of both redriven and non-redriven executions. + If you provide a state machine ARN in redriveFilter, the API returns a validation exception. - `"stateMachineArn"`: The Amazon Resource Name (ARN) of the state machine whose executions - is listed. You can specify either a mapRunArn or a stateMachineArn, but not both. + is listed. You can specify either a mapRunArn or a stateMachineArn, but not both. You can + also return a list of executions associated with a specific alias or version, by specifying + an alias ARN or a version ARN in the stateMachineArn parameter. - `"statusFilter"`: If specified, only list the executions whose current execution status matches the given filter. """ @@ -628,6 +864,118 @@ function list_map_runs( ) end +""" + list_state_machine_aliases(state_machine_arn) + list_state_machine_aliases(state_machine_arn, params::Dict{String,<:Any}) + +Lists aliases for a specified state machine ARN. Results are sorted by time, with the most +recently created aliases listed first. To list aliases that reference a state machine +version, you can specify the version ARN in the stateMachineArn parameter. If nextToken is +returned, there are more results available. The value of nextToken is a unique pagination +token for each page. Make the call again using the returned token to retrieve the next +page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. +Using an expired pagination token will return an HTTP 400 InvalidToken error. Related +operations: CreateStateMachineAlias DescribeStateMachineAlias +UpdateStateMachineAlias DeleteStateMachineAlias + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine for which you + want to list aliases. If you specify a state machine version ARN, this API returns a list + of aliases for that version. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 100 and the maximum allowed + page size is 1000. A value of 0 uses the default. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_state_machine_aliases( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "ListStateMachineAliases", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_state_machine_aliases( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "ListStateMachineAliases", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_state_machine_versions(state_machine_arn) + list_state_machine_versions(state_machine_arn, params::Dict{String,<:Any}) + +Lists versions for the specified state machine Amazon Resource Name (ARN). The results are +sorted in descending order of the version creation time. If nextToken is returned, there +are more results available. The value of nextToken is a unique pagination token for each +page. Make the call again using the returned token to retrieve the next page. Keep all +other arguments unchanged. Each pagination token expires after 24 hours. Using an expired +pagination token will return an HTTP 400 InvalidToken error. Related operations: +PublishStateMachineVersion DeleteStateMachineVersion + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 100 and the maximum allowed + page size is 1000. A value of 0 uses the default. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_state_machine_versions( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "ListStateMachineVersions", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_state_machine_versions( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "ListStateMachineVersions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_state_machines() list_state_machines(params::Dict{String,<:Any}) @@ -699,12 +1047,140 @@ function list_tags_for_resource( ) end +""" + publish_state_machine_version(state_machine_arn) + publish_state_machine_version(state_machine_arn, params::Dict{String,<:Any}) + +Creates a version from the current revision of a state machine. Use versions to create +immutable snapshots of your state machine. You can start executions from versions either +directly or with an alias. To create an alias, use CreateStateMachineAlias. You can publish +up to 1000 versions for each state machine. You must manually delete unused versions using +the DeleteStateMachineVersion API action. PublishStateMachineVersion is an idempotent API. +It doesn't create a duplicate state machine version if it already exists for the current +revision. Step Functions bases PublishStateMachineVersion's idempotency check on the +stateMachineArn, name, and revisionId parameters. Requests with the same parameters return +a successful idempotent response. If you don't specify a revisionId, Step Functions checks +for a previously published version of the state machine's current revision. Related +operations: DeleteStateMachineVersion ListStateMachineVersions + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: An optional description of the state machine version. +- `"revisionId"`: Only publish the state machine version if the current state machine's + revision ID matches the specified ID. Use this option to avoid publishing a version if the + state machine changed since you last updated it. If the specified revision ID doesn't match + the state machine's current revision ID, the API returns ConflictException. To specify an + initial revision ID for a state machine with no revision ID assigned, specify the string + INITIAL for the revisionId parameter. For example, you can specify a revisionID of INITIAL + when you create a state machine using the CreateStateMachine API action. +""" +function publish_state_machine_version( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "PublishStateMachineVersion", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function publish_state_machine_version( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "PublishStateMachineVersion", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + redrive_execution(execution_arn) + redrive_execution(execution_arn, params::Dict{String,<:Any}) + +Restarts unsuccessful executions of Standard workflows that didn't complete successfully in +the last 14 days. These include failed, aborted, or timed out executions. When you redrive +an execution, it continues the failed execution from the unsuccessful step and uses the +same input. Step Functions preserves the results and execution history of the successful +steps, and doesn't rerun these steps when you redrive an execution. Redriven executions use +the same state machine definition and execution ARN as the original execution attempt. For +workflows that include an Inline Map or Parallel state, RedriveExecution API action +reschedules and redrives only the iterations and branches that failed or aborted. To +redrive a workflow that includes a Distributed Map state whose Map Run failed, you must +redrive the parent workflow. The parent workflow redrives all the unsuccessful states, +including a failed Map Run. If a Map Run was not started in the original execution attempt, +the redriven parent workflow starts the Map Run. This API action is not supported by +EXPRESS state machines. However, you can restart the unsuccessful executions of Express +child workflows in a Distributed Map by redriving its Map Run. When you redrive a Map Run, +the Express child workflows are rerun using the StartExecution API action. For more +information, see Redriving Map Runs. You can redrive executions if your original execution +meets the following conditions: The execution status isn't SUCCEEDED. Your workflow +execution has not exceeded the redrivable period of 14 days. Redrivable period refers to +the time during which you can redrive a given execution. This period starts from the day a +state machine completes its execution. The workflow execution has not exceeded the +maximum open time of one year. For more information about state machine quotas, see Quotas +related to state machine executions. The execution event history count is less than +24,999. Redriven executions append their event history to the existing event history. Make +sure your workflow execution contains less than 24,999 events to accommodate the +ExecutionRedriven history event and at least one other history event. + +# Arguments +- `execution_arn`: The Amazon Resource Name (ARN) of the execution to be redriven. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. The API will return idempotent responses for the last 10 client tokens used to + successfully redrive the execution. These client tokens are valid for up to 15 minutes + after they are first used. +""" +function redrive_execution(executionArn; aws_config::AbstractAWSConfig=global_aws_config()) + return sfn( + "RedriveExecution", + Dict{String,Any}("executionArn" => executionArn, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function redrive_execution( + executionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "RedriveExecution", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "executionArn" => executionArn, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ send_task_failure(task_token) send_task_failure(task_token, params::Dict{String,<:Any}) -Used by activity workers and task states using the callback pattern to report that the task -identified by the taskToken failed. +Used by activity workers, Task states using the callback pattern, and optionally Task +states using the job run pattern to report that the task identified by the taskToken failed. # Arguments - `task_token`: The token that represents this task. Task tokens are generated by Step @@ -743,16 +1219,17 @@ end send_task_heartbeat(task_token) send_task_heartbeat(task_token, params::Dict{String,<:Any}) -Used by activity workers and task states using the callback pattern to report to Step -Functions that the task represented by the specified taskToken is still making progress. -This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state -machine's Amazon States Language definition (HeartbeatSeconds). This action does not in -itself create an event in the execution history. However, if the task times out, the -execution history contains an ActivityTimedOut entry for activities, or a TaskTimedOut -entry for for tasks using the job run or callback pattern. The Timeout of a task, defined -in the state machine's Amazon States Language definition, is its maximum allowed duration, -regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds to -configure the timeout interval for heartbeats. +Used by activity workers and Task states using the callback pattern, and optionally Task +states using the job run pattern to report to Step Functions that the task represented by +the specified taskToken is still making progress. This action resets the Heartbeat clock. +The Heartbeat threshold is specified in the state machine's Amazon States Language +definition (HeartbeatSeconds). This action does not in itself create an event in the +execution history. However, if the task times out, the execution history contains an +ActivityTimedOut entry for activities, or a TaskTimedOut entry for tasks using the job run +or callback pattern. The Timeout of a task, defined in the state machine's Amazon States +Language definition, is its maximum allowed duration, regardless of the number of +SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the timeout interval +for heartbeats. # Arguments - `task_token`: The token that represents this task. Task tokens are generated by Step @@ -787,8 +1264,9 @@ end send_task_success(output, task_token) send_task_success(output, task_token, params::Dict{String,<:Any}) -Used by activity workers and task states using the callback pattern to report that the task -identified by the taskToken completed successfully. +Used by activity workers, Task states using the callback pattern, and optionally Task +states using the job run pattern to report that the task identified by the taskToken +completed successfully. # Arguments - `output`: The JSON output of the task. Length constraints apply to the payload size, and @@ -832,21 +1310,49 @@ end start_execution(state_machine_arn) start_execution(state_machine_arn, params::Dict{String,<:Any}) -Starts a state machine execution. If the given state machine Amazon Resource Name (ARN) is -a qualified state machine ARN, it will fail with ValidationException. A qualified state -machine ARN refers to a Distributed Map state defined within a state machine. For example, -the qualified state machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. StartExecution is idempotent for STANDARD workflows. For a STANDARD -workflow, if StartExecution is called with the same name and input as a running execution, -the call will succeed and return the same response as the original request. If the -execution is closed or if the input is different, it will return a 400 -ExecutionAlreadyExists error. Names can be reused after 90 days. StartExecution is not +Starts a state machine execution. A qualified state machine ARN can either refer to a +Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The +following are some examples of qualified and unqualified state machine ARNs: The +following qualified state machine ARN refers to a Distributed Map state with a label +mapStateLabel in a state machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> If you start an execution with an unqualified state machine ARN, Step Functions +uses the latest revision of the state machine for the execution. To start executions of a +state machine version, call StartExecution and provide the version ARN or the ARN of an +alias that points to the version. StartExecution is idempotent for STANDARD workflows. +For a STANDARD workflow, if you call StartExecution with the same name and input as a +running execution, the call succeeds and return the same response as the original request. +If the execution is closed or if the input is different, it returns a 400 +ExecutionAlreadyExists error. You can reuse names after 90 days. StartExecution isn't idempotent for EXPRESS workflows. # Arguments -- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to execute. +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to execute. The + stateMachineArn parameter accepts one of the following inputs: An unqualified state + machine ARN – Refers to a state machine ARN that isn't qualified with a version or alias + ARN. The following is an example of an unqualified state machine ARN. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne> Step Functions doesn't associate state machine executions that you start with an + unqualified ARN with a version. This is true even if that version uses the same revision + that the execution used. A state machine version ARN – Refers to a version ARN, which + is a combination of state machine ARN and the version number separated by a colon (:). The + following is an example of the ARN for version 10. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne>:10 Step Functions doesn't associate executions that you start with a version ARN + with any aliases that point to that version. A state machine alias ARN – Refers to an + alias ARN, which is a combination of state machine ARN and the alias name separated by a + colon (:). The following is an example of the ARN for an alias named PROD. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne:PROD> Step Functions associates executions that you start with an alias ARN with + that alias and the state machine version used for that execution. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -854,13 +1360,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"input\": \"{\"first_name\" : \"test\"}\" If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\" Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding. -- `"name"`: The name of the execution. This name must be unique for your Amazon Web - Services account, region, and state machine for 90 days. For more information, see Limits - Related to State Machine Executions in the Step Functions Developer Guide. A name must not - contain: white space brackets < > { } [ ] wildcard characters ? * special - characters \" # % ^ | ~ ` & , ; : / control characters (U+0000-001F, U+007F-009F) - To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and - _. +- `"name"`: Optional name of the execution. This name must be unique for your Amazon Web + Services account, Region, and state machine for 90 days. For more information, see Limits + Related to State Machine Executions in the Step Functions Developer Guide. If you don't + provide a name for the execution, Step Functions automatically generates a universally + unique identifier (UUID) as the execution name. A name must not contain: white space + brackets < > { } [ ] wildcard characters ? * special characters \" # % ^ | ~ ` + & , ; : / control characters (U+0000-001F, U+007F-009F) To enable logging with + CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. - `"traceHeader"`: Passes the X-Ray trace header. The trace header can also be passed in the request payload. """ @@ -1021,6 +1528,77 @@ function tag_resource( ) end +""" + test_state(definition, role_arn) + test_state(definition, role_arn, params::Dict{String,<:Any}) + +Accepts the definition of a single state and executes it. You can test a state without +creating a state machine or updating an existing state machine. Using this API, you can +test the following: A state's input and output processing data flow An Amazon Web +Services service integration request and response An HTTP Task request and response You +can call this API on only one state at a time. The states that you can test include the +following: All Task types except Activity Pass Wait Choice Succeed +Fail The TestState API assumes an IAM role which must contain the required IAM +permissions for the resources your state is accessing. For information about the +permissions a state might need, see IAM permissions to test a state. The TestState API can +run for up to five minutes. If the execution of a state exceeds this duration, it fails +with the States.Timeout error. TestState doesn't support Activity tasks, .sync or +.waitForTaskToken service integration patterns, Parallel, or Map states. + +# Arguments +- `definition`: The Amazon States Language (ASL) definition of the state. +- `role_arn`: The Amazon Resource Name (ARN) of the execution role with the required IAM + permissions for the state. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"input"`: A string that contains the JSON input data for the state. +- `"inspectionLevel"`: Determines the values to return when a state is tested. You can + specify one of the following types: INFO: Shows the final state output. By default, Step + Functions sets inspectionLevel to INFO if you don't specify a level. DEBUG: Shows the + final state output along with the input and output data processing result. TRACE: Shows + the HTTP request and response for an HTTP Task. This level also shows the final state + output along with the input and output data processing result. Each of these levels also + provide information about the status of the state execution and the next state to + transition to. +- `"revealSecrets"`: Specifies whether or not to include secret information in the test + result. For HTTP Tasks, a secret includes the data that an EventBridge connection adds to + modify the HTTP request headers, query parameters, and body. Step Functions doesn't omit + any information included in the state definition or the HTTP response. If you set + revealSecrets to true, you must make sure that the IAM user that calls the TestState API + has permission for the states:RevealSecrets action. For an example of IAM policy that sets + the states:RevealSecrets permission, see IAM permissions to test a state. Without this + permission, Step Functions throws an access denied error. By default, revealSecrets is set + to false. +""" +function test_state(definition, roleArn; aws_config::AbstractAWSConfig=global_aws_config()) + return sfn( + "TestState", + Dict{String,Any}("definition" => definition, "roleArn" => roleArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_state( + definition, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "TestState", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("definition" => definition, "roleArn" => roleArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) @@ -1111,15 +1689,30 @@ end Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a -MissingRequiredParameter error. If the given state machine Amazon Resource Name (ARN) is a -qualified state machine ARN, it will fail with ValidationException. A qualified state -machine ARN refers to a Distributed Map state defined within a state machine. For example, -the qualified state machine ARN +MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map +state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. All StartExecution calls within a few seconds will use the updated -definition and roleArn. Executions started immediately after calling UpdateStateMachine may -use the previous state machine definition and roleArn. +stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state +defined within a state machine, a version ARN, or an alias ARN. The following are some +examples of qualified and unqualified state machine ARNs: The following qualified state +machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine +named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> After you update your state machine, you can set the publish parameter to true in +the same action to publish a new version. This way, you can opt-in to strict versioning of +your state machine. Step Functions assigns monotonically increasing integers for state +machine versions, starting at version number 1. All StartExecution calls within a few +seconds use the updated definition and roleArn. Executions started immediately after you +call UpdateStateMachine may use the previous state machine definition and roleArn. # Arguments - `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. @@ -1128,10 +1721,14 @@ use the previous state machine definition and roleArn. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"definition"`: The Amazon States Language definition of the state machine. See Amazon States Language. -- `"loggingConfiguration"`: The LoggingConfiguration data type is used to set CloudWatch - Logs options. +- `"loggingConfiguration"`: Use the LoggingConfiguration data type to set CloudWatch Logs + options. +- `"publish"`: Specifies whether the state machine version is published. The default is + false. To publish a version after updating the state machine, set publish to true. - `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role of the state machine. - `"tracingConfiguration"`: Selects whether X-Ray tracing is enabled. +- `"versionDescription"`: An optional description of the state machine version to publish. + You can only specify the versionDescription parameter if you've set publish to true. """ function update_state_machine( stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1159,3 +1756,106 @@ function update_state_machine( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_state_machine_alias(state_machine_alias_arn) + update_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Updates the configuration of an existing state machine alias by modifying its description +or routingConfiguration. You must specify at least one of the description or +routingConfiguration parameters to update a state machine alias. UpdateStateMachineAlias +is an idempotent API. Step Functions bases the idempotency check on the +stateMachineAliasArn, description, and routingConfiguration parameters. Requests with the +same parameters return an idempotent response. This operation is eventually consistent. +All StartExecution requests made within a few seconds use the latest alias configuration. +Executions started immediately after calling UpdateStateMachineAlias may use the previous +routing configuration. Related operations: CreateStateMachineAlias +DescribeStateMachineAlias ListStateMachineAliases DeleteStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the state machine alias. +- `"routingConfiguration"`: The routing configuration of the state machine alias. An array + of RoutingConfig objects that specifies up to two state machine versions that the alias + starts executions for. +""" +function update_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "UpdateStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "UpdateStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + validate_state_machine_definition(definition) + validate_state_machine_definition(definition, params::Dict{String,<:Any}) + +Validates the syntax of a state machine definition. You can validate that a state machine +definition is correct without creating a state machine resource. Step Functions will +implicitly perform the same syntax check when you invoke CreateStateMachine and +UpdateStateMachine. State machine definitions are specified using a JSON-based, structured +language. For more information on Amazon States Language see Amazon States Language (ASL). +Suggested uses for ValidateStateMachineDefinition: Integrate automated checks into your +code review or Continuous Integration (CI) process to validate state machine definitions +before starting deployments. Run the validation from a Git pre-commit hook to check your +state machine definitions before committing them to your source repository. Errors found +in the state machine definition will be returned in the response as a list of diagnostic +elements, rather than raise an exception. + +# Arguments +- `definition`: The Amazon States Language definition of the state machine. For more + information, see Amazon States Language (ASL). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"type"`: The target type of state machine for this definition. The default is STANDARD. +""" +function validate_state_machine_definition( + definition; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "ValidateStateMachineDefinition", + Dict{String,Any}("definition" => definition); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function validate_state_machine_definition( + definition, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "ValidateStateMachineDefinition", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("definition" => definition), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/signer.jl b/src/services/signer.jl index ca712742f9..581701a578 100644 --- a/src/services/signer.jl +++ b/src/services/signer.jl @@ -11,7 +11,15 @@ using AWS.UUIDs Adds cross-account permissions to a signing profile. # Arguments -- `action`: The AWS Signer action permitted as part of cross-account permissions. +- `action`: For cross-account signing. Grant a designated account permission to perform one + or more of the following actions. Each action is associated with a specific API's + operations. For more information about cross-account signing, see Using cross-account + signing with signing profiles in the AWS Signer Developer Guide. You can designate the + following actions to an account. signer:StartSigningJob. This action isn't supported for + container image workflows. For details, see StartSigningJob. signer:SignPayload. This + action isn't supported for AWS Lambda workflows. For details, see SignPayload + signer:GetSigningProfile. For details, see GetSigningProfile. signer:RevokeSignature. + For details, see RevokeSignature. - `principal`: The AWS principal receiving cross-account permissions. This may be an IAM role or another AWS account ID. - `profile_name`: The human-readable name of the signing profile. @@ -144,7 +152,13 @@ signing certificate. - `certificate_hashes`: A list of composite signed hashes that identify certificates. A certificate identifier consists of a subject certificate TBS hash (signed by the parent CA) combined with a parent CA TBS hash (signed by the parent CA’s CA). Root certificates are - defined as their own CA. + defined as their own CA. The following example shows how to calculate a hash for this + parameter using OpenSSL commands: openssl asn1parse -in childCert.pem -strparse 4 -out + childCert.tbs openssl sha384 < childCert.tbs -binary > childCertTbsHash openssl + asn1parse -in parentCert.pem -strparse 4 -out parentCert.tbs openssl sha384 < + parentCert.tbs -binary > parentCertTbsHash xxd -p childCertTbsHash > + certificateHash.hex xxd -p parentCertTbsHash >> certificateHash.hex cat + certificateHash.hex | tr -d 'n' - `job_arn`: The ARN of a signing job. - `platform_id`: The ID of a signing platform. - `profile_version_arn`: The version of a signing profile. @@ -312,11 +326,11 @@ end list_signing_jobs(params::Dict{String,<:Any}) Lists all your signing jobs. You can use the maxResults parameter to limit the number of -signing jobs that are returned in the response. If additional jobs remain to be listed, -code signing returns a nextToken value. Use this value in subsequent calls to -ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs -with your maxResults parameter and with new values that code signing returns in the -nextToken parameter until all of your signing jobs have been returned. +signing jobs that are returned in the response. If additional jobs remain to be listed, AWS +Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to +fetch the remaining values. You can continue calling ListSigningJobs with your maxResults +parameter and with new values that Signer returns in the nextToken parameter until all of +your signing jobs have been returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -360,12 +374,11 @@ end list_signing_platforms() list_signing_platforms(params::Dict{String,<:Any}) -Lists all signing platforms available in code signing that match the request parameters. If -additional jobs remain to be listed, code signing returns a nextToken value. Use this value -in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue -calling ListSigningJobs with your maxResults parameter and with new values that code -signing returns in the nextToken parameter until all of your signing jobs have been -returned. +Lists all signing platforms available in AWS Signer that match the request parameters. If +additional jobs remain to be listed, Signer returns a nextToken value. Use this value in +subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling +ListSigningJobs with your maxResults parameter and with new values that Signer returns in +the nextToken parameter until all of your signing jobs have been returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -400,10 +413,10 @@ end Lists all available signing profiles in your AWS account. Returns only profiles with an ACTIVE status unless the includeCanceled request field is set to true. If additional jobs -remain to be listed, code signing returns a nextToken value. Use this value in subsequent +remain to be listed, AWS Signer returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling -ListSigningJobs with your maxResults parameter and with new values that code signing -returns in the nextToken parameter until all of your signing jobs have been returned. +ListSigningJobs with your maxResults parameter and with new values that Signer returns in +the nextToken parameter until all of your signing jobs have been returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -472,7 +485,7 @@ end put_signing_profile(platform_id, profile_name) put_signing_profile(platform_id, profile_name, params::Dict{String,<:Any}) -Creates a signing profile. A signing profile is a code signing template that can be used to +Creates a signing profile. A signing profile is a code-signing template that can be used to carry out a pre-defined signing job. # Arguments @@ -671,7 +684,8 @@ Signs a binary payload and returns a signature envelope. # Arguments - `payload`: Specifies the object digest (hash) to sign. -- `payload_format`: Payload content type +- `payload_format`: Payload content type. The single valid type is + application/vnd.cncf.notary.payload.v1+json. - `profile_name`: The name of the signing profile. # Optional Parameters @@ -727,12 +741,13 @@ Initiates a signing job to be performed on the code provided. Signing jobs are v the ListSigningJobs operation for two years after they are performed. Note the following requirements: You must create an Amazon S3 source bucket. For more information, see Creating a Bucket in the Amazon S3 Getting Started Guide. Your S3 source bucket must be -version enabled. You must create an S3 destination bucket. Code signing uses your S3 +version enabled. You must create an S3 destination bucket. AWS Signer uses your S3 destination bucket to write your signed code. You specify the name of the source and -destination buckets when calling the StartSigningJob operation. You must also specify a -request token that identifies your request to code signing. You can call the -DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob. For a -Java example that shows how to use this action, see StartSigningJob. +destination buckets when calling the StartSigningJob operation. You must ensure the S3 +buckets are from the same Region as the signing profile. Cross-Region signing isn't +supported. You must also specify a request token that identifies your request to Signer. + You can call the DescribeSigningJob and the ListSigningJobs actions after you call +StartSigningJob. For a Java example that shows how to use this action, see StartSigningJob. # Arguments - `client_request_token`: String that identifies the signing request. All calls after the diff --git a/src/services/snowball.jl b/src/services/snowball.jl index 94eea5ec34..8bf98a7d86 100644 --- a/src/services/snowball.jl +++ b/src/services/snowball.jl @@ -78,7 +78,9 @@ end Creates an address for a Snow device to be shipped to. In most regions, addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. If the address is invalid or unsupported, then an -exception is thrown. +exception is thrown. If providing an address as a JSON file through the cli-input-json +option, include the full file path. For example, --cli-input-json +file://create-address.json. # Arguments - `address`: The address that you want the Snow device shipped to. @@ -244,14 +246,14 @@ Description: Snowcone Device type: EDGE_S Capacity: T98 Description: Edge Storage Optimized for data transfer only Device type: EDGE_CG Capacity: T42 Description: Snowball Edge Compute Optimized with GPU Device type: EDGE_C Capacity: T42 Description: Snowball Edge Compute Optimized without GPU Device type: EDGE -Capacity: T100 Description: Snowball Edge Storage Optimized with EC2 Compute Device -type: STANDARD Capacity: T50 Description: Original Snowball device This device is -only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region -Device type: STANDARD Capacity: T80 Description: Original Snowball device This device -is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region. -Device type: V3_5C Capacity: T32 Description: Snowball Edge Compute Optimized without -GPU Device type: V3_5S Capacity: T240 Description: Snowball Edge Storage -Optimized 210TB +Capacity: T100 Description: Snowball Edge Storage Optimized with EC2 Compute This +device is replaced with T98. Device type: STANDARD Capacity: T50 Description: +Original Snowball device This device is only available in the Ningxia, Beijing, and +Singapore Amazon Web Services Region Device type: STANDARD Capacity: T80 +Description: Original Snowball device This device is only available in the Ningxia, +Beijing, and Singapore Amazon Web Services Region. Snow Family device type: +RACK_5U_C Capacity: T13 Description: Snowblade. Device type: V3_5S Capacity: +T240 Description: Snowball Edge Storage Optimized 210TB # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -269,6 +271,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (Snow Family Devices and Capacity) in the Snowcone User Guide. - `"ForwardingAddressId"`: The forwarding address ID for a job. This field is not supported in most Regions. +- `"ImpactLevel"`: The highest impact level of data that will be stored or processed on the + device, provided at job creation. - `"JobType"`: Defines the type of job that you're creating. - `"KmsKeyARN"`: The KmsKeyARN that you want to associate with this job. KmsKeyARNs are created using the CreateKey Key Management Service (KMS) API action. @@ -279,10 +283,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys device that your transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File System) and the Amazon Web Services Storage Gateway service Tape Gateway type. +- `"PickupDetails"`: Information identifying the person picking up the device. - `"RemoteManagement"`: Allows you to securely operate and manage Snowcone devices remotely from outside of your internal network. When set to INSTALLED_AUTOSTART, remote management will automatically be available when the device arrives at your location. Otherwise, you - need to use the Snowball Client to manage the device. + need to use the Snowball Edge client to manage the device. When set to NOT_INSTALLED, + remote management will not be available on the device. - `"Resources"`: Defines the Amazon S3 buckets associated with this job. With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into. With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported @@ -330,8 +336,8 @@ function create_job( end """ - create_long_term_pricing(long_term_pricing_type) - create_long_term_pricing(long_term_pricing_type, params::Dict{String,<:Any}) + create_long_term_pricing(long_term_pricing_type, snowball_type) + create_long_term_pricing(long_term_pricing_type, snowball_type, params::Dict{String,<:Any}) Creates a job with the long-term usage option for a device. The long-term usage is a 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web @@ -340,25 +346,28 @@ Services provides discounts for long-term pricing. # Arguments - `long_term_pricing_type`: The type of long-term pricing option you want for the device, either 1-year or 3-year long-term pricing. +- `snowball_type`: The type of Snow Family devices to use for the long-term pricing job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IsLongTermPricingAutoRenew"`: Specifies whether the current long-term pricing type for the device should be renewed. -- `"SnowballType"`: The type of Snow Family devices to use for the long-term pricing job. """ function create_long_term_pricing( - LongTermPricingType; aws_config::AbstractAWSConfig=global_aws_config() + LongTermPricingType, SnowballType; aws_config::AbstractAWSConfig=global_aws_config() ) return snowball( "CreateLongTermPricing", - Dict{String,Any}("LongTermPricingType" => LongTermPricingType); + Dict{String,Any}( + "LongTermPricingType" => LongTermPricingType, "SnowballType" => SnowballType + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_long_term_pricing( LongTermPricingType, + SnowballType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -367,7 +376,10 @@ function create_long_term_pricing( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("LongTermPricingType" => LongTermPricingType), + Dict{String,Any}( + "LongTermPricingType" => LongTermPricingType, + "SnowballType" => SnowballType, + ), params, ), ); @@ -778,10 +790,10 @@ end list_compatible_images() list_compatible_images(params::Dict{String,<:Any}) -This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs) that -are owned by your Amazon Web Services accountthat would be supported for use on a Snow -device. Currently, supported AMIs are based on the Amazon Linux-2, Ubuntu 20.04 LTS - -Focal, or Ubuntu 22.04 LTS - Jammy images, available on the Amazon Web Services +This action returns a list of the different Amazon EC2-compatible Amazon Machine Images +(AMIs) that are owned by your Amazon Web Services accountthat would be supported for use on +a Snow device. Currently, supported AMIs are based on the Amazon Linux-2, Ubuntu 20.04 LTS +- Focal, or Ubuntu 22.04 LTS - Jammy images, available on the Amazon Web Services Marketplace. Ubuntu 16.04 LTS - Xenial (HVM) images are no longer supported in the Market, but still supported for use on devices through Amazon EC2 VM Import/Export and running locally in AMIs. @@ -865,6 +877,35 @@ function list_long_term_pricing( ) end +""" + list_pickup_locations() + list_pickup_locations(params::Dict{String,<:Any}) + +A list of locations from which the customer can choose to pickup a device. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of locations to list per page. +- `"NextToken"`: HTTP requests are stateless. To identify what object comes \"next\" in the + list of ListPickupLocationsRequest objects, you have the option of specifying NextToken as + the starting point for your returned list. +""" +function list_pickup_locations(; aws_config::AbstractAWSConfig=global_aws_config()) + return snowball( + "ListPickupLocations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_pickup_locations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return snowball( + "ListPickupLocations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_service_versions(service_name) list_service_versions(service_name, params::Dict{String,<:Any}) @@ -985,6 +1026,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys device that your transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File System) and the Amazon Web Services Storage Gateway service Tape Gateway type. +- `"PickupDetails"`: - `"Resources"`: The updated JobResource object, or the updated JobResource object. - `"RoleARN"`: The new role Amazon Resource Name (ARN) that you want to associate with this job. To create a role ARN, use the CreateRoleIdentity and Access Management (IAM) API diff --git a/src/services/sns.jl b/src/services/sns.jl index 9755c57124..5062cf546d 100644 --- a/src/services/sns.jl +++ b/src/services/sns.jl @@ -164,18 +164,23 @@ such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile app register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. PlatformPrincipal and PlatformCredential are received from the notification service. For ADM, PlatformPrincipal is client id and -PlatformCredential is client secret. For Baidu, PlatformPrincipal is API key and -PlatformCredential is secret key. For APNS and APNS_SANDBOX using certificate +PlatformCredential is client secret. For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key. For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and -PlatformCredential is signing key. For GCM (Firebase Cloud Messaging), there is no -PlatformPrincipal and the PlatformCredential is API key. For MPNS, PlatformPrincipal is -TLS certificate and PlatformCredential is private key. For WNS, PlatformPrincipal is -Package Security Identifier and PlatformCredential is secret key. You can use the -returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action. +PlatformCredential is signing key. For Baidu, PlatformPrincipal is API key and +PlatformCredential is secret key. For GCM (Firebase Cloud Messaging) using key +credentials, there is no PlatformPrincipal. The PlatformCredential is API key. For GCM +(Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The +PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services +CLI, the file must be in string format and special characters must be ignored. To format +the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq +@json <<< cat service.json`. For MPNS, PlatformPrincipal is TLS certificate and +PlatformCredential is private key. For WNS, PlatformPrincipal is Package Security +Identifier and PlatformCredential is secret key. You can use the returned +PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action. # Arguments -- `attributes`: For a list of attributes, see SetPlatformApplicationAttributes. +- `attributes`: For a list of attributes, see SetPlatformApplicationAttributes . - `name`: Application names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, hyphens, and periods, and must be between 1 and 256 characters long. - `platform`: The following platforms are supported: ADM (Amazon Device Messaging), APNS @@ -233,7 +238,7 @@ attributes must be provided: ChannelId and UserId. The token field must also con ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu. # Arguments -- `platform_application_arn`: PlatformApplicationArn returned from +- `platform_application_arn`: PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint. - `token`: Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being @@ -243,7 +248,7 @@ ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Attributes"`: For a list of attributes, see SetEndpointAttributes. +- `"Attributes"`: For a list of attributes, see SetEndpointAttributes . - `"CustomUserData"`: Arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB. """ @@ -347,7 +352,7 @@ a new topic. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Attributes"`: A map of attributes with their corresponding values. The following lists - the names, descriptions, and values of the special request parameters that the CreateTopic + names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The @@ -363,15 +368,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API - Reference. The following attributes apply only to FIFO topics: FifoTopic – When - this is set to true, a FIFO topic is created. ContentBasedDeduplication – Enables - content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set - to false. If you create a FIFO topic and this attribute is false, you must specify a value - for the MessageDeduplicationId parameter for the Publish action. When you set - ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the - MessageDeduplicationId using the body of the message (but not the attributes of the - message). (Optional) To override the generated value, you can specify a value for the - MessageDeduplicationId parameter for the Publish action. + Reference. The following attributes apply only to FIFO topics: ArchivePolicy – Adds + or updates an inline policy document to archive messages stored in the specified Amazon SNS + topic. BeginningArchiveTime – The earliest starting point at which a message in the + topic’s archive can be replayed from. This point in time is based on the configured + message retention period set by the topic’s message archiving policy. + ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By + default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this + attribute is false, you must specify a value for the MessageDeduplicationId parameter for + the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a + SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not + the attributes of the message). (Optional) To override the generated value, you can specify + a value for the MessageDeduplicationId parameter for the Publish action. - `"DataProtectionPolicy"`: The body of the policy document you want to use for this topic. You can only add one policy per topic. The policy must be in JSON string format. Length Constraints: Maximum length of 30,720. @@ -407,7 +415,7 @@ delete an endpoint that is also subscribed to a topic, then you must also unsubs endpoint from the topic. # Arguments -- `endpoint_arn`: EndpointArn of endpoint to delete. +- `endpoint_arn`: EndpointArn of endpoint to delete. """ function delete_endpoint(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -442,7 +450,7 @@ such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Mobile Push Notifications. # Arguments -- `platform_application_arn`: PlatformApplicationArn of platform application object to +- `platform_application_arn`: PlatformApplicationArn of platform application object to delete. """ @@ -599,7 +607,7 @@ services, such as GCM (Firebase Cloud Messaging) and APNS. For more information, Amazon SNS Mobile Push Notifications. # Arguments -- `endpoint_arn`: EndpointArn for GetEndpointAttributes input. +- `endpoint_arn`: EndpointArn for GetEndpointAttributes input. """ function get_endpoint_attributes( @@ -636,7 +644,7 @@ notification services, such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Amazon SNS Mobile Push Notifications. # Arguments -- `platform_application_arn`: PlatformApplicationArn for +- `platform_application_arn`: PlatformApplicationArn for GetPlatformApplicationAttributesInput. """ @@ -809,12 +817,12 @@ information, see Using Amazon SNS Mobile Push Notifications. This action is thr 30 transactions per second (TPS). # Arguments -- `platform_application_arn`: PlatformApplicationArn for +- `platform_application_arn`: PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"NextToken"`: NextToken string is used when calling ListEndpointsByPlatformApplication +- `"NextToken"`: NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results. """ function list_endpoints_by_platform_application( @@ -922,7 +930,7 @@ is throttled at 15 transactions per second (TPS). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"NextToken"`: NextToken string is used when calling ListPlatformApplications action to +- `"NextToken"`: NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results. """ function list_platform_applications(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1201,9 +1209,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value for the TargetArn or TopicArn parameters. - `"Subject"`: Optional parameter to be used as the \"Subject\" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard - JSON messages delivered to other endpoints. Constraints: Subjects must be ASCII text that - begins with a letter, number, or punctuation mark; must not include line breaks or control - characters; and must be less than 100 characters long. + JSON messages delivered to other endpoints. Constraints: Subjects must be UTF-8 text with + no line breaks or control characters, and less than 100 characters long. - `"TargetArn"`: If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters. - `"TopicArn"`: The topic you want to publish to. If you don't specify a value for the @@ -1447,15 +1454,20 @@ delivery status, see Using Amazon SNS Application Attributes for Message Deliver notification service. For ADM, PlatformCredentialis client secret. For Apple Services using certificate credentials, PlatformCredential is private key. For Apple Services using token credentials, PlatformCredential is signing key. For GCM (Firebase Cloud - Messaging), PlatformCredential is API key. PlatformPrincipal – The principal - received from the notification service. For ADM, PlatformPrincipalis client id. For - Apple Services using certificate credentials, PlatformPrincipal is SSL certificate. For - Apple Services using token credentials, PlatformPrincipal is signing key ID. For GCM - (Firebase Cloud Messaging), there is no PlatformPrincipal. EventEndpointCreated – - Topic ARN to which EndpointCreated event notifications are sent. EventEndpointDeleted - – Topic ARN to which EndpointDeleted event notifications are sent. - EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications are sent. - EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications are sent + Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is + API key. For GCM (Firebase Cloud Messaging) using token credentials, there is no + PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using + the Amazon Web Services CLI, the file must be in string format and special characters must + be ignored. To format the file correctly, Amazon SNS recommends using the following + command: SERVICE_JSON=`jq @json <<< cat service.json`. PlatformPrincipal + – The principal received from the notification service. For ADM, PlatformPrincipalis + client id. For Apple Services using certificate credentials, PlatformPrincipal is SSL + certificate. For Apple Services using token credentials, PlatformPrincipal is signing key + ID. For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal. + EventEndpointCreated – Topic ARN to which EndpointCreated event notifications are sent. + EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications are sent. + EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications are sent. + EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints. SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf. FailureFeedbackRoleArn – IAM role ARN used to give @@ -1464,7 +1476,7 @@ delivery status, see Using Amazon SNS Application Attributes for Message Deliver attributes only apply to APNs token-based authentication: ApplePlatformTeamID – The identifier that's assigned to your Apple developer account team. ApplePlatformBundleID – The bundle identifier that's assigned to your iOS app. -- `platform_application_arn`: PlatformApplicationArn for SetPlatformApplicationAttributes +- `platform_application_arn`: PlatformApplicationArn for SetPlatformApplicationAttributes action. """ @@ -1598,13 +1610,12 @@ Allows a subscription owner to set an attribute of the subscription to a new val dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue - for further analysis or reprocessing. The following attribute applies only to Amazon - Kinesis Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of - the IAM role that has the following: Permission to write to the Kinesis Data Firehose - delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this - attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more - information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS - Developer Guide. + for further analysis or reprocessing. The following attribute applies only to Amazon Data + Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role + that has the following: Permission to write to the Firehose delivery stream Amazon SNS + listed as a trusted entity Specifying a valid ARN for this attribute is required for + Firehose delivery stream subscriptions. For more information, see Fanout to Firehose + delivery streams in the Amazon SNS Developer Guide. - `subscription_arn`: The ARN of the subscription to modify. # Optional Parameters @@ -1763,7 +1774,7 @@ Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or if the endpoint and the topic are not in the same Amazon Web Services account, the endpoint owner must run the ConfirmSubscription action to confirm the subscription. You call the ConfirmSubscription action with the token from the subscription response. Confirmation -tokens are valid for three days. This action is throttled at 100 transactions per second +tokens are valid for two days. This action is throttled at 100 transactions per second (TPS). # Arguments @@ -1794,13 +1805,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue - for further analysis or reprocessing. The following attribute applies only to Amazon - Kinesis Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of - the IAM role that has the following: Permission to write to the Kinesis Data Firehose - delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this - attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more - information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS - Developer Guide. + for further analysis or reprocessing. The following attribute applies only to Amazon Data + Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role + that has the following: Permission to write to the Firehose delivery stream Amazon SNS + listed as a trusted entity Specifying a valid ARN for this attribute is required for + Firehose delivery stream subscriptions. For more information, see Fanout to Firehose + delivery streams in the Amazon SNS Developer Guide. The following attributes apply only + to FIFO topics: ReplayPolicy – Adds or updates an inline policy document for a + subscription to replay messages stored in the specified Amazon SNS topic. ReplayStatus + – Retrieves the status of the subscription message replay, which can be one of the + following: Completed – The replay has successfully redelivered all messages, and is + now delivering newly published messages. If an ending point was specified in the + ReplayPolicy then the subscription will no longer receive newly published messages. In + progress – The replay is currently replaying the selected messages. Failed – The + replay was unable to complete. Pending – The default state while the replay initiates. + - `"Endpoint"`: The endpoint that you want to receive notifications. Endpoints vary by protocol: For the http protocol, the (public) endpoint is a URL beginning with http://. For the https protocol, the (public) endpoint is a URL beginning with https://. For the diff --git a/src/services/sqs.jl b/src/services/sqs.jl index 5fd3398d9b..57ce27cad4 100644 --- a/src/services/sqs.jl +++ b/src/services/sqs.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - add_permission(awsaccount_id, action_name, label, queue_url) - add_permission(awsaccount_id, action_name, label, queue_url, params::Dict{String,<:Any}) + add_permission(awsaccount_ids, actions, label, queue_url) + add_permission(awsaccount_ids, actions, label, queue_url, params::Dict{String,<:Any}) Adds a permission to a queue for a specific principal. This allows sharing access to the queue. When you create a queue, you have full control access rights for the queue. Only @@ -23,10 +23,10 @@ permissions don't apply to this action. For more information, see Grant cross-ac permissions to a role and a username in the Amazon SQS Developer Guide. # Arguments -- `awsaccount_id`: The Amazon Web Services account numbers of the principals who are to +- `awsaccount_ids`: The Amazon Web Services account numbers of the principals who are to receive permission. For information about locating the Amazon Web Services account identification, see Your Amazon Web Services Identifiers in the Amazon SQS Developer Guide. -- `action_name`: The action the client wants to allow for the specified principal. Valid +- `actions`: The action the client wants to allow for the specified principal. Valid values: the name of any action or *. For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon SQS Developer Guide. Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for @@ -38,21 +38,10 @@ permissions to a role and a username in the Amazon SQS Developer Guide. - `queue_url`: The URL of the Amazon SQS queue to which permissions are added. Queue URLs and names are case-sensitive. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AWSAccountIds"`: The Amazon Web Services account numbers of the principals who are to - receive permission. For information about locating the Amazon Web Services account - identification, see Your Amazon Web Services Identifiers in the Amazon SQS Developer Guide. -- `"Actions"`: The action the client wants to allow for the specified principal. Valid - values: the name of any action or *. For more information about these actions, see Overview - of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon - SQS Developer Guide. Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for - ActionName.n also grants permissions for the corresponding batch versions of those actions: - SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch. """ function add_permission( - AWSAccountId, - ActionName, + AWSAccountIds, + Actions, Label, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config(), @@ -60,8 +49,8 @@ function add_permission( return sqs( "AddPermission", Dict{String,Any}( - "AWSAccountId" => AWSAccountId, - "ActionName" => ActionName, + "AWSAccountIds" => AWSAccountIds, + "Actions" => Actions, "Label" => Label, "QueueUrl" => QueueUrl, ); @@ -70,8 +59,8 @@ function add_permission( ) end function add_permission( - AWSAccountId, - ActionName, + AWSAccountIds, + Actions, Label, QueueUrl, params::AbstractDict{String}; @@ -83,8 +72,8 @@ function add_permission( mergewith( _merge, Dict{String,Any}( - "AWSAccountId" => AWSAccountId, - "ActionName" => ActionName, + "AWSAccountIds" => AWSAccountIds, + "Actions" => Actions, "Label" => Label, "QueueUrl" => QueueUrl, ), @@ -100,10 +89,14 @@ end cancel_message_move_task(task_handle) cancel_message_move_task(task_handle, params::Dict{String,<:Any}) -Cancels a specified message movement task. A message movement can only be cancelled when -the current status is RUNNING. Cancelling a message movement task does not revert the +Cancels a specified message movement task. A message movement can only be cancelled when +the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been -moved yet. +moved yet. This action is currently limited to supporting message redrive from +dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue +(DLQ), while the destination queue can be the original source queue (from which the +messages were driven to the dead-letter-queue), or a custom destination queue. Only one +active message movement task is supported per queue at any given time. # Arguments - `task_handle`: An identifier associated with a message movement task. @@ -223,8 +216,8 @@ function change_message_visibility( end """ - change_message_visibility_batch(change_message_visibility_batch_request_entry, queue_url) - change_message_visibility_batch(change_message_visibility_batch_request_entry, queue_url, params::Dict{String,<:Any}) + change_message_visibility_batch(entries, queue_url) + change_message_visibility_batch(entries, queue_url, params::Dict{String,<:Any}) Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually @@ -234,34 +227,24 @@ of successful and unsuccessful actions, you should check for batch errors even w call returns an HTTP status code of 200. # Arguments -- `change_message_visibility_batch_request_entry`: Lists the receipt handles of the - messages for which the visibility timeout must be changed. +- `entries`: Lists the receipt handles of the messages for which the visibility timeout + must be changed. - `queue_url`: The URL of the Amazon SQS queue whose messages' visibility is changed. Queue URLs and names are case-sensitive. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Entries"`: Lists the receipt handles of the messages for which the visibility timeout - must be changed. """ function change_message_visibility_batch( - ChangeMessageVisibilityBatchRequestEntry, - QueueUrl; - aws_config::AbstractAWSConfig=global_aws_config(), + Entries, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config() ) return sqs( "ChangeMessageVisibilityBatch", - Dict{String,Any}( - "ChangeMessageVisibilityBatchRequestEntry" => - ChangeMessageVisibilityBatchRequestEntry, - "QueueUrl" => QueueUrl, - ); + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function change_message_visibility_batch( - ChangeMessageVisibilityBatchRequestEntry, + Entries, QueueUrl, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -271,11 +254,7 @@ function change_message_visibility_batch( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "ChangeMessageVisibilityBatchRequestEntry" => - ChangeMessageVisibilityBatchRequestEntry, - "QueueUrl" => QueueUrl, - ), + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl), params, ), ); @@ -316,7 +295,7 @@ username in the Amazon SQS Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Attribute"`: A map of attributes with their corresponding values. The following lists +- `"Attributes"`: A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 @@ -405,9 +384,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. -- `"Tag"`: Add cost allocation tags to the specified Amazon SQS queue. For an overview, see - Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue tags, - keep the following guidelines in mind: Adding more than 50 tags to a queue isn't +- `"tags"`: Add cost allocation tags to the specified Amazon SQS queue. For an overview, + see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue + tags, keep the following guidelines in mind: Adding more than 50 tags to a queue isn't recommended. Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings. Tags are case-sensitive. A new tag with a key identical to that of an existing tag overwrites the existing tag. For a full list of tag restrictions, see @@ -496,8 +475,8 @@ function delete_message( end """ - delete_message_batch(delete_message_batch_request_entry, queue_url) - delete_message_batch(delete_message_batch_request_entry, queue_url, params::Dict{String,<:Any}) + delete_message_batch(entries, queue_url) + delete_message_batch(entries, queue_url, params::Dict{String,<:Any}) Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the @@ -506,32 +485,23 @@ unsuccessful actions, you should check for batch errors even when the call retur status code of 200. # Arguments -- `delete_message_batch_request_entry`: Lists the receipt handles for the messages to be - deleted. +- `entries`: Lists the receipt handles for the messages to be deleted. - `queue_url`: The URL of the Amazon SQS queue from which messages are deleted. Queue URLs and names are case-sensitive. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Entries"`: Lists the receipt handles for the messages to be deleted. """ function delete_message_batch( - DeleteMessageBatchRequestEntry, - QueueUrl; - aws_config::AbstractAWSConfig=global_aws_config(), + Entries, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config() ) return sqs( "DeleteMessageBatch", - Dict{String,Any}( - "DeleteMessageBatchRequestEntry" => DeleteMessageBatchRequestEntry, - "QueueUrl" => QueueUrl, - ); + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function delete_message_batch( - DeleteMessageBatchRequestEntry, + Entries, QueueUrl, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -541,10 +511,7 @@ function delete_message_batch( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "DeleteMessageBatchRequestEntry" => DeleteMessageBatchRequestEntry, - "QueueUrl" => QueueUrl, - ), + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl), params, ), ); @@ -807,6 +774,11 @@ end list_message_move_tasks(source_arn, params::Dict{String,<:Any}) Gets the most recent message movement tasks (up to 10) under a specific source queue. +This action is currently limited to supporting message redrive from dead-letter queues +(DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the +destination queue can be the original source queue (from which the messages were driven to +the dead-letter-queue), or a custom destination queue. Only one active message movement +task is supported per queue at any given time. # Arguments - `source_arn`: The ARN of the queue whose message movement tasks are to be listed. @@ -914,12 +886,12 @@ end purge_queue(queue_url) purge_queue(queue_url, params::Dict{String,<:Any}) -Deletes the messages in a queue specified by the QueueURL parameter. When you use the -PurgeQueue action, you can't retrieve any messages deleted from a queue. The message -deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of -your queue's size. Messages sent to the queue before you call PurgeQueue might be -received but are deleted within the next minute. Messages sent to the queue after you call -PurgeQueue might be deleted while the queue is being purged. +Deletes available messages in a queue (including in-flight messages) specified by the +QueueURL parameter. When you use the PurgeQueue action, you can't retrieve any messages +deleted from a queue. The message deletion process takes up to 60 seconds. We recommend +waiting for 60 seconds regardless of your queue's size. Messages sent to the queue before +you call PurgeQueue might be received but are deleted within the next minute. Messages sent +to the queue after you call PurgeQueue might be deleted while the queue is being purged. # Arguments - `queue_url`: The URL of the queue from which the PurgeQueue action deletes messages. @@ -983,8 +955,10 @@ attributes gracefully. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AttributeNames"`: A list of attributes that need to be returned along with each - message. These attributes include: All – Returns all values. +- `"AttributeNames"`: This parameter has been deprecated but will be supported for + backward compatibility. To provide attribute names, you are encouraged to use + MessageSystemAttributeNames. A list of attributes that need to be returned along with + each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader @@ -1010,6 +984,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ReceiveMessage, you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.*. +- `"MessageSystemAttributeNames"`: A list of attributes that need to be returned along with + each message. These attributes include: All – Returns all values. + ApproximateFirstReceiveTimestamp – Returns the time the message was first received from + the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number + of times a message has been received across all queues but not deleted. AWSTraceHeader + – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, + for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example + ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was + sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables + server-side queue encryption using SQS owned encryption keys. Only one server-side + encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). + MessageDeduplicationId – Returns the value provided by the producer that calls the + SendMessage action. MessageGroupId – Returns the value provided by the producer that + calls the SendMessage action. Messages with the same MessageGroupId are returned in + sequence. SequenceNumber – Returns the value provided by Amazon SQS. - `"ReceiveRequestAttemptId"`: This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic @@ -1017,40 +1006,37 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys retrieve the same set of messages, even if their visibility timeout has not yet expired. You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action. When you set FifoQueue, a caller of the ReceiveMessage action can provide a - ReceiveRequestAttemptId explicitly. If a caller of the ReceiveMessage action doesn't - provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId. It is - possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none - of the messages have been modified (deleted or had their visibility changes). During a - visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same - messages and receipt handles. If a retry occurs within the deduplication interval, it - resets the visibility timeout. For more information, see Visibility Timeout in the Amazon - SQS Developer Guide. If a caller of the ReceiveMessage action still processes messages - when the visibility timeout expires and messages become visible, another worker consuming - from the same queue can receive the same messages and therefore process duplicates. Also, - if a consumer whose message processing time is longer than the visibility timeout tries to - delete the processed messages, the action fails with an error. To mitigate this effect, - ensure that your application observes a safe threshold before the visibility timeout - expires and extend the visibility timeout as necessary. While messages with a particular - MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are - returned until the visibility timeout expires. You can still receive messages with another - MessageGroupId as long as it is also visible. If a caller of ReceiveMessage can't track - the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. - As a result, delays might occur but the messages in the queue remain in a strict order. - The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId - can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation - (!\"#%&'()*+,-./:;<=>?@[]^_`{|}~). For best practices of using - ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the - Amazon SQS Developer Guide. + ReceiveRequestAttemptId explicitly. It is possible to retry the ReceiveMessage action + with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted + or had their visibility changes). During a visibility timeout, subsequent calls with the + same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry + occurs within the deduplication interval, it resets the visibility timeout. For more + information, see Visibility Timeout in the Amazon SQS Developer Guide. If a caller of the + ReceiveMessage action still processes messages when the visibility timeout expires and + messages become visible, another worker consuming from the same queue can receive the same + messages and therefore process duplicates. Also, if a consumer whose message processing + time is longer than the visibility timeout tries to delete the processed messages, the + action fails with an error. To mitigate this effect, ensure that your application observes + a safe threshold before the visibility timeout expires and extend the visibility timeout as + necessary. While messages with a particular MessageGroupId are invisible, no more + messages belonging to the same MessageGroupId are returned until the visibility timeout + expires. You can still receive messages with another MessageGroupId as long as it is also + visible. If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no + retries work until the original visibility timeout expires. As a result, delays might occur + but the messages in the queue remain in a strict order. The maximum length of + ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric + characters (a-z, A-Z, 0-9) and punctuation (!\"#%&'()*+,-./:;<=>?@[]^_`{|}~). For + best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId + Request Parameter in the Amazon SQS Developer Guide. - `"VisibilityTimeout"`: The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. - `"WaitTimeSeconds"`: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner - than WaitTimeSeconds. If no messages are available and the wait time expires, the call - returns successfully with an empty list of messages. To avoid HTTP errors, ensure that the - HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds - parameter. For example, with the Java SDK, you can set HTTP transport settings using the - NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous - clients. + than WaitTimeSeconds. If no messages are available and the wait time expires, the call does + not return a message list. To avoid HTTP errors, ensure that the HTTP response timeout for + ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the + Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for + asynchronous clients, or the ApacheHttpClient for synchronous clients. """ function receive_message(QueueUrl; aws_config::AbstractAWSConfig=global_aws_config()) return sqs( @@ -1126,16 +1112,22 @@ end send_message(message_body, queue_url, params::Dict{String,<:Any}) Delivers a message to the specified queue. A message can include only XML, JSON, and -unformatted text. The following Unicode characters are allowed: #x9 | #xA | #xD | #x20 to -#xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any characters not included in this list -will be rejected. For more information, see the W3C specification for characters. +unformatted text. The following Unicode characters are allowed. For more information, see +the W3C specification for characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD +| #x10000 to #x10FFFF Amazon SQS does not throw an exception or completely reject the +message if it contains invalid characters. Instead, it replaces those invalid characters +with U+FFFD before storing the message in the queue, as long as the message body contains +at least one valid character. # Arguments - `message_body`: The message to send. The minimum size is one character. The maximum size is 256 KiB. A message can include only XML, JSON, and unformatted text. The following - Unicode characters are allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | - #x10000 to #x10FFFF Any characters not included in this list will be rejected. For more - information, see the W3C specification for characters. + Unicode characters are allowed. For more information, see the W3C specification for + characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF + Amazon SQS does not throw an exception or completely reject the message if it contains + invalid characters. Instead, it replaces those invalid characters with U+FFFD before + storing the message in the queue, as long as the message body contains at least one valid + character. - `queue_url`: The URL of the Amazon SQS queue to which a message is sent. Queue URLs and names are case-sensitive. @@ -1146,7 +1138,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies. When you set FifoQueue, you can't set DelaySeconds per message. You can set this parameter only on a queue level. -- `"MessageAttribute"`: Each message attribute consists of a Name, Type, and Value. For +- `"MessageAttributes"`: Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide. - `"MessageDeduplicationId"`: This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. If a message with a particular @@ -1184,12 +1176,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys each user is processed in a FIFO fashion. You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails. ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the - messages are sorted by time sent. The caller can't specify a MessageGroupId. The length - of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation - (!\"#%&'()*+,-./:;<=>?@[]^_`{|}~). For best practices of using MessageGroupId, - see Using the MessageGroupId Property in the Amazon SQS Developer Guide. MessageGroupId - is required for FIFO queues. You can't use it for Standard queues. -- `"MessageSystemAttribute"`: The message system attribute to send. Each message system + messages are sorted by time sent. The caller can't specify a MessageGroupId. The maximum + length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and + punctuation (!\"#%&'()*+,-./:;<=>?@[]^_`{|}~). For best practices of using + MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer Guide. + MessageGroupId is required for FIFO queues. You can't use it for Standard queues. +- `"MessageSystemAttributes"`: The message system attribute to send. Each message system attribute consists of a Name, Type, and Value. Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted X-Ray trace header string. The size of a message system attribute @@ -1226,8 +1218,8 @@ function send_message( end """ - send_message_batch(queue_url, send_message_batch_request_entry) - send_message_batch(queue_url, send_message_batch_request_entry, params::Dict{String,<:Any}) + send_message_batch(entries, queue_url) + send_message_batch(entries, queue_url, params::Dict{String,<:Any}) You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). @@ -1238,39 +1230,33 @@ combination of successful and unsuccessful actions, you should check for batch e when the call returns an HTTP status code of 200. The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes). A message can include only XML, JSON, -and unformatted text. The following Unicode characters are allowed: #x9 | #xA | #xD | #x20 -to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any characters not included in this -list will be rejected. For more information, see the W3C specification for characters. If -you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default -value for the queue. +and unformatted text. The following Unicode characters are allowed. For more information, +see the W3C specification for characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to +#xFFFD | #x10000 to #x10FFFF Amazon SQS does not throw an exception or completely reject +the message if it contains invalid characters. Instead, it replaces those invalid +characters with U+FFFD before storing the message in the queue, as long as the message body +contains at least one valid character. If you don't specify the DelaySeconds parameter for +an entry, Amazon SQS uses the default value for the queue. # Arguments +- `entries`: A list of SendMessageBatchRequestEntry items. - `queue_url`: The URL of the Amazon SQS queue to which batched messages are sent. Queue URLs and names are case-sensitive. -- `send_message_batch_request_entry`: A list of SendMessageBatchRequestEntry items. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Entries"`: A list of SendMessageBatchRequestEntry items. """ function send_message_batch( - QueueUrl, - SendMessageBatchRequestEntry; - aws_config::AbstractAWSConfig=global_aws_config(), + Entries, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config() ) return sqs( "SendMessageBatch", - Dict{String,Any}( - "QueueUrl" => QueueUrl, - "SendMessageBatchRequestEntry" => SendMessageBatchRequestEntry, - ); + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function send_message_batch( + Entries, QueueUrl, - SendMessageBatchRequestEntry, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1279,10 +1265,7 @@ function send_message_batch( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "QueueUrl" => QueueUrl, - "SendMessageBatchRequestEntry" => SendMessageBatchRequestEntry, - ), + Dict{String,Any}("Entries" => Entries, "QueueUrl" => QueueUrl), params, ), ); @@ -1292,24 +1275,25 @@ function send_message_batch( end """ - set_queue_attributes(attribute, queue_url) - set_queue_attributes(attribute, queue_url, params::Dict{String,<:Any}) - -Sets the value of one or more queue attributes. When you change a queue's attributes, the -change can take up to 60 seconds for most of the attributes to propagate throughout the -Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 -minutes and will impact existing messages in the queue potentially causing them to be -expired and deleted if the MessageRetentionPeriod is reduced below the age of existing -messages. In the future, new attributes might be added. If you write code that calls -this action, we recommend that you structure your code so that it can handle new attributes -gracefully. Cross-account permissions don't apply to this action. For more information, -see Grant cross-account permissions to a role and a username in the Amazon SQS Developer -Guide. To remove the ability to change queue permissions, you must deny permission to the -AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. + set_queue_attributes(attributes, queue_url) + set_queue_attributes(attributes, queue_url, params::Dict{String,<:Any}) + +Sets the value of one or more queue attributes, like a policy. When you change a queue's +attributes, the change can take up to 60 seconds for most of the attributes to propagate +throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can +take up to 15 minutes and will impact existing messages in the queue potentially causing +them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of +existing messages. In the future, new attributes might be added. If you write code that +calls this action, we recommend that you structure your code so that it can handle new +attributes gracefully. Cross-account permissions don't apply to this action. For more +information, see Grant cross-account permissions to a role and a username in the Amazon SQS +Developer Guide. To remove the ability to change queue permissions, you must deny +permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your +IAM policy. # Arguments -- `attribute`: A map of attributes to set. The following lists the names, descriptions, and - values of the special request parameters that the SetQueueAttributes action uses: +- `attributes`: A map of attributes to set. The following lists the names, descriptions, + and values of the special request parameters that the SetQueueAttributes action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0. MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS @@ -1397,17 +1381,17 @@ AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM poli """ function set_queue_attributes( - Attribute, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config() + Attributes, QueueUrl; aws_config::AbstractAWSConfig=global_aws_config() ) return sqs( "SetQueueAttributes", - Dict{String,Any}("Attribute" => Attribute, "QueueUrl" => QueueUrl); + Dict{String,Any}("Attributes" => Attributes, "QueueUrl" => QueueUrl); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function set_queue_attributes( - Attribute, + Attributes, QueueUrl, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1417,7 +1401,7 @@ function set_queue_attributes( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("Attribute" => Attribute, "QueueUrl" => QueueUrl), + Dict{String,Any}("Attributes" => Attributes, "QueueUrl" => QueueUrl), params, ), ); @@ -1432,15 +1416,18 @@ end Starts an asynchronous task to move messages from a specified source queue to a specified destination queue. This action is currently limited to supporting message redrive from -dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue -(DLQ), while the destination queue can be the original source queue (from which the -messages were driven to the dead-letter-queue), or a custom destination queue. -Currently, only standard queues are supported. Only one active message movement task is -supported per queue at any given time. +queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. +Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are +currently not supported. In dead-letter queues redrive context, the StartMessageMoveTask +the source queue is the DLQ, while the destination queue can be the original source queue +(from which the messages were driven to the dead-letter-queue), or a custom destination +queue. Only one active message movement task is supported per queue at any given time. # Arguments - `source_arn`: The ARN of the queue that contains the messages to be moved to another - queue. Currently, only dead-letter queue (DLQ) ARNs are accepted. + queue. Currently, only ARNs of dead-letter queues (DLQs) whose sources are other Amazon SQS + queues are accepted. DLQs whose sources are non-SQS queues, such as Lambda or Amazon SNS + topics, are not currently supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1525,8 +1512,8 @@ function tag_queue( end """ - untag_queue(queue_url, tag_key) - untag_queue(queue_url, tag_key, params::Dict{String,<:Any}) + untag_queue(queue_url, tag_keys) + untag_queue(queue_url, tag_keys, params::Dict{String,<:Any}) Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. Cross-account @@ -1535,23 +1522,20 @@ permissions to a role and a username in the Amazon SQS Developer Guide. # Arguments - `queue_url`: The URL of the queue. -- `tag_key`: The list of tags to be removed from the specified queue. +- `tag_keys`: The list of tags to be removed from the specified queue. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"TagKeys"`: The list of tags to be removed from the specified queue. """ -function untag_queue(QueueUrl, TagKey; aws_config::AbstractAWSConfig=global_aws_config()) +function untag_queue(QueueUrl, TagKeys; aws_config::AbstractAWSConfig=global_aws_config()) return sqs( "UntagQueue", - Dict{String,Any}("QueueUrl" => QueueUrl, "TagKey" => TagKey); + Dict{String,Any}("QueueUrl" => QueueUrl, "TagKeys" => TagKeys); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function untag_queue( QueueUrl, - TagKey, + TagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1559,7 +1543,9 @@ function untag_queue( "UntagQueue", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("QueueUrl" => QueueUrl, "TagKey" => TagKey), params + _merge, + Dict{String,Any}("QueueUrl" => QueueUrl, "TagKeys" => TagKeys), + params, ), ); aws_config=aws_config, diff --git a/src/services/ssm.jl b/src/services/ssm.jl index 6b98ca6297..181c0bb64c 100644 --- a/src/services/ssm.jl +++ b/src/services/ssm.jl @@ -22,7 +22,7 @@ for each resource type. Using a consistent set of tag keys makes it easier for y manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to and are interpreted strictly as a string of characters. For more information about using tags with Amazon Elastic Compute Cloud (Amazon -EC2) instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Guide. +EC2) instances, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. # Arguments - `resource_id`: The resource ID you want to tag. Use the ID of the resource. Here are some @@ -33,7 +33,8 @@ EC2) instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Gui OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager. For the Document and - Parameter values, use the name of the resource. ManagedInstance: mi-012345abcde The + Parameter values, use the name of the resource. If you're tagging a shared document, you + must use the full ARN of the document. ManagedInstance: mi-012345abcde The ManagedInstance type for this API operation is only for on-premises managed nodes. You must specify the name of the managed node in the following format: mi-ID_number . For example, mi-1a2b3c4d5e6f. @@ -234,17 +235,18 @@ Registering these machines with Systems Manager makes it possible to manage them Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Setting up Amazon Web Services -Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User -Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises -servers and VMs that are configured for Systems Manager are all called managed nodes. +Systems Manager for hybrid and multicloud environments in the Amazon Web Services Systems +Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, +and on-premises servers and VMs that are configured for Systems Manager are all called +managed nodes. # Arguments - `iam_role`: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more - information, see Create an IAM service role for a hybrid environment in the Amazon Web - Services Systems Manager User Guide. You can't specify an IAM service-linked role for this - parameter. You must create a unique role. + information, see Create an IAM service role for a hybrid and multicloud environment in the + Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked + role for this parameter. You must create a unique role. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -339,13 +341,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. - `"ComplianceSeverity"`: The severity level to assign to the association. -- `"DocumentVersion"`: The document version you want to associate with the target(s). Can - be a specific version or the default version. State Manager doesn't support running +- `"DocumentVersion"`: The document version you want to associate with the targets. Can be + a specific version or the default version. State Manager doesn't support running associations that use a new version of a document if that document is shared from another account. State Manager always runs the default version of a document if shared from another account, even though the Systems Manager console shows that a new version was processed. If you want to run an association using a new version of a document shared form another account, you must set the document version to default. +- `"Duration"`: The number of hours the association can run before it is canceled. Duration + applies to associations that are currently running, and any pending and in progress + commands on all targets. If a target was taken offline for the association to run, it is + made available again immediately, without a reboot. The Duration parameter applies only + when both these conditions are true: The association for which you specify a duration is + cancelable according to the parameters of the SSM command document or Automation runbook + associated with this execution. The command specifies the ApplyOnlyAtCronInterval + parameter, which means that the association doesn't run immediately after it is created, + but only according to the specified schedule. - `"InstanceId"`: The managed node ID. InstanceId has been deprecated. To specify a managed node ID for an association, use the Targets parameter. Requests that include the parameter InstanceID with Systems Manager documents (SSM documents) that use schema version @@ -374,7 +385,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys store the output details of the request. - `"Parameters"`: The parameters for the runtime configuration of the document. - `"ScheduleExpression"`: A cron expression when the association will be applied to the - target(s). + targets. - `"ScheduleOffset"`: Number of days to wait after the scheduled day to run an association. For example, if you specified a cron schedule of cron(0 0 ? * THU#2 *), you could specify an offset of 3 to run the association each Sunday after the second Thursday of the month. @@ -403,7 +414,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about - choosing targets for an association, see Using targets and rate controls with State Manager + choosing targets for an association, see About targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide. """ function create_association(Name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -477,11 +488,11 @@ Guide. parameters at runtime. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command. For examples, see the following topics in the Amazon Web Services Systems Manager User Guide. Create an SSM - document (Amazon Web Services API) Create an SSM document (Amazon Web Services CLI) - Create an SSM document (API) + document (console) Create an SSM document (command line) Create an SSM document + (API) - `name`: A name for the SSM document. You can't use the following strings as document name prefixes. These are reserved by Amazon Web Services for use as document name prefixes: - aws amazon amzn + aws amazon amzn AWSEC2 AWSConfigRemediation AWSSupport # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -585,7 +596,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Zone Database on the IANA website. - `"StartDate"`: The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become active. StartDate allows you to delay activation of the - maintenance window until the specified future date. + maintenance window until the specified future date. When using a rate schedule, if you + provide a start date that occurs in the past, the current date and time are used as the + start date. - `"Tags"`: Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a maintenance window to identify the type of tasks it will run, the types @@ -651,15 +664,17 @@ end create_ops_item(description, source, title, params::Dict{String,<:Any}) Creates a new OpsItem. You must have permission in Identity and Access Management (IAM) to -create a new OpsItem. For more information, see Getting started with OpsCenter in the -Amazon Web Services Systems Manager User Guide. Operations engineers and IT professionals -use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate -operational issues impacting the performance and health of their Amazon Web Services -resources. For more information, see Amazon Web Services Systems Manager OpsCenter in the -Amazon Web Services Systems Manager User Guide. +create a new OpsItem. For more information, see Set up OpsCenter in the Amazon Web Services +Systems Manager User Guide. Operations engineers and IT professionals use Amazon Web +Services Systems Manager OpsCenter to view, investigate, and remediate operational issues +impacting the performance and health of their Amazon Web Services resources. For more +information, see Amazon Web Services Systems Manager OpsCenter in the Amazon Web Services +Systems Manager User Guide. # Arguments -- `description`: Information about the OpsItem. +- `description`: User-defined text that contains information about the OpsItem, in Markdown + format. Provide enough information so that users viewing this OpsItem for the first time + understand the issue. - `source`: The origin of the OpsItem, such as Amazon EC2 or Systems Manager. The source name can't contain the following strings: aws, amazon, and amzn. - `title`: A short heading that describes the nature of the OpsItem and the impacted @@ -669,8 +684,8 @@ Amazon Web Services Systems Manager User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AccountId"`: The target Amazon Web Services account where you want to create an OpsItem. To make this call, your account must be configured to work with OpsItems across - accounts. For more information, see Setting up OpsCenter to work with OpsItems across - accounts in the Amazon Web Services Systems Manager User Guide. + accounts. For more information, see Set up OpsCenter in the Amazon Web Services Systems + Manager User Guide. - `"ActualEndTime"`: The time a runbook workflow ended. Currently reported only for the OpsItem type /aws/changerequest. - `"ActualStartTime"`: The time a runbook workflow started. Currently reported only for the @@ -690,12 +705,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys by users who have access to the OpsItem (as provided by the GetOpsItem API operation). Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the - OpsItem. To view Amazon Web Services CLI example commands that use these keys, see Creating + OpsItem. To view Amazon Web Services CLI example commands that use these keys, see Create OpsItems manually in the Amazon Web Services Systems Manager User Guide. - `"OpsItemType"`: The type of OpsItem to create. Systems Manager supports the following types of OpsItems: /aws/issue This type of OpsItem is used for default OpsItems created by OpsCenter. /aws/changerequest This type of OpsItem is used by Change Manager for - reviewing and approving or rejecting change requests. /aws/insights This type of + reviewing and approving or rejecting change requests. /aws/insight This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate OpsItems. - `"PlannedEndTime"`: The time specified in a change request for a runbook workflow to end. Currently supported only for the OpsItem type /aws/changerequest. @@ -706,13 +721,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys OpsItems. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource. - `"Severity"`: Specify a severity to assign to an OpsItem. -- `"Tags"`: Optional metadata that you assign to a resource. You can restrict access to - OpsItems by using an inline IAM policy that specifies tags. For more information, see - Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide. Tags - use a key-value pair. For example: Key=Department,Value=Finance To add tags to a new - OpsItem, a user must have IAM permissions for both the ssm:CreateOpsItems operation and the - ssm:AddTagsToResource operation. To add tags to an existing OpsItem, use the - AddTagsToResource operation. +- `"Tags"`: Optional metadata that you assign to a resource. Tags use a key-value pair. For + example: Key=Department,Value=Finance To add tags to a new OpsItem, a user must have IAM + permissions for both the ssm:CreateOpsItems operation and the ssm:AddTagsToResource + operation. To add tags to an existing OpsItem, use the AddTagsToResource operation. """ function create_ops_item( Description, Source, Title; aws_config::AbstractAWSConfig=global_aws_config() @@ -829,10 +841,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default - action if no option is specified. BLOCK : Packages in the RejectedPatches list, and - packages that include them as dependencies, aren't installed under any circumstances. If a - package was installed before it was added to the Rejected patches list, it is considered - non-compliant with the patch baseline, and its status is reported as InstalledRejected. + action if no option is specified. BLOCK: Packages in the Rejected patches list, and + packages that include them as dependencies, aren't installed by Patch Manager under any + circumstances. If a package was installed before it was added to the Rejected patches list, + or is installed outside of Patch Manager afterward, it's considered noncompliant with the + patch baseline and its status is reported as InstalledRejected. - `"Sources"`: Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only. - `"Tags"`: Optional metadata that you assign to a resource. Tags enable you to categorize @@ -1131,6 +1144,51 @@ function delete_maintenance_window( ) end +""" + delete_ops_item(ops_item_id) + delete_ops_item(ops_item_id, params::Dict{String,<:Any}) + +Delete an OpsItem. You must have permission in Identity and Access Management (IAM) to +delete an OpsItem. Note the following important information about this operation. +Deleting an OpsItem is irreversible. You can't restore a deleted OpsItem. This operation +uses an eventual consistency model, which means the system can take a few minutes to +complete this operation. If you delete an OpsItem and immediately call, for example, +GetOpsItem, the deleted OpsItem might still appear in the response. This operation is +idempotent. The system doesn't throw an exception if you repeatedly call this operation for +the same OpsItem. If the first call is successful, all additional calls return the same +successful response as the first call. This operation doesn't support cross-account +calls. A delegated administrator or management account can't delete OpsItems in other +accounts, even if OpsCenter has been set up for cross-account administration. For more +information about cross-account administration, see Setting up OpsCenter to centrally +manage OpsItems across accounts in the Systems Manager User Guide. + +# Arguments +- `ops_item_id`: The ID of the OpsItem that you want to delete. + +""" +function delete_ops_item(OpsItemId; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm( + "DeleteOpsItem", + Dict{String,Any}("OpsItemId" => OpsItemId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ops_item( + OpsItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm( + "DeleteOpsItem", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("OpsItemId" => OpsItemId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_ops_metadata(ops_metadata_arn) delete_ops_metadata(ops_metadata_arn, params::Dict{String,<:Any}) @@ -1174,7 +1232,8 @@ Delete a parameter from the system. After deleting a parameter, wait for at leas seconds to create a parameter with the same name. # Arguments -- `name`: The name of the parameter to delete. +- `name`: The name of the parameter to delete. You can't enter the Amazon Resource Name + (ARN) for a parameter, only the parameter name itself. """ function delete_parameter(Name; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1205,7 +1264,8 @@ create a parameter with the same name. # Arguments - `names`: The names of the parameters to delete. After deleting a parameter, wait for at - least 30 seconds to create a parameter with the same name. + least 30 seconds to create a parameter with the same name. You can't enter the Amazon + Resource Name (ARN) for a parameter, only the parameter name itself. """ function delete_parameters(Names; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1308,9 +1368,12 @@ end Deletes a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager -resources. Currently, OpsItemGroup is the only resource that supports Systems Manager -resource policies. The resource policy for OpsItemGroup enables Amazon Web Services -accounts to view and interact with OpsCenter operational work items (OpsItems). +resources. The following resources support Systems Manager resource policies. +OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to +view and interact with OpsCenter operational work items (OpsItems). Parameter - The +resource policy is used to share a parameter with other accounts using Resource Access +Manager (RAM). For more information about cross-account sharing of parameters, see Working +with shared parameters in the Amazon Web Services Systems Manager User Guide. # Arguments - `policy_hash`: ID of the current policy version. The hash helps to prevent multiple calls @@ -1771,7 +1834,9 @@ end describe_available_patches() describe_available_patches(params::Dict{String,<:Any}) -Lists all patches eligible to be included in a patch baseline. +Lists all patches eligible to be included in a patch baseline. Currently, +DescribeAvailablePatches supports only the Amazon Linux 1, Amazon Linux 2, and Windows +Server operating systems. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1829,8 +1894,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DocumentVersion"`: The document version for which you want information. Can be a specific version or the default version. - `"VersionName"`: An optional field specifying the version of the artifact associated with - the document. For example, \"Release 12, Update 6\". This value is unique across all - versions of a document, and can't be changed. + the document. For example, 12.6. This value is unique across all versions of a document, + and can't be changed. """ function describe_document(Name; aws_config::AbstractAWSConfig=global_aws_config()) return ssm( @@ -1905,7 +1970,7 @@ end describe_effective_instance_associations(instance_id) describe_effective_instance_associations(instance_id, params::Dict{String,<:Any}) -All associations for the managed node(s). +All associations for the managed nodes. # Arguments - `instance_id`: The managed node ID for which you want to view all associations. @@ -1987,7 +2052,7 @@ end describe_instance_associations_status(instance_id) describe_instance_associations_status(instance_id, params::Dict{String,<:Any}) -The status of the associations for the managed node(s). +The status of the associations for the managed nodes. # Arguments - `instance_id`: The managed node IDs for which you want association status information. @@ -2028,26 +2093,29 @@ end describe_instance_information() describe_instance_information(params::Dict{String,<:Any}) -Describes one or more of your managed nodes, including information about the operating -system platform, the version of SSM Agent installed on the managed node, node status, and -so on. If you specify one or more managed node IDs, it returns information for those -managed nodes. If you don't specify node IDs, it returns information for all your managed -nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive -an error. The IamRole field for this API operation is the Identity and Access Management -(IAM) role assigned to on-premises managed nodes. This call doesn't return the IAM role for -EC2 instances. +Provides information about one or more of your managed nodes, including the operating +system platform, SSM Agent version, association status, and IP address. This operation does +not return information for nodes that are either Stopped or Terminated. If you specify one +or more node IDs, the operation returns information for those managed nodes. If you don't +specify node IDs, it returns information for all your managed nodes. If you specify a node +ID that isn't valid or a node that you don't own, you receive an error. The IamRole field +returned for this API operation is the Identity and Access Management (IAM) role assigned +to on-premises managed nodes. This operation does not return the IAM role for EC2 +instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: One or more filters. Use a filter to return a more specific list of managed - nodes. You can filter based on tags applied to your managed nodes. Use this Filters data - type instead of InstanceInformationFilterList, which is deprecated. + nodes. You can filter based on tags applied to your managed nodes. Tag filters can't be + combined with other filter types. Use this Filters data type instead of + InstanceInformationFilterList, which is deprecated. - `"InstanceInformationFilterList"`: This is a legacy method. We recommend that you don't use this method. Instead, use the Filters data type. Filters enables you to return node information by filtering based on tags applied to managed nodes. Attempting to use InstanceInformationFilterList and Filters leads to an exception error. - `"MaxResults"`: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. + The default value is 10 items. - `"NextToken"`: The token for the next set of items to return. (You received this token from a previous call.) """ @@ -2171,7 +2239,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low - State Sample values: Installed | InstalledOther | InstalledPendingReboot + State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of + all State values, see Understanding patch compliance state values in the Amazon Web + Services Systems Manager User Guide. - `"MaxResults"`: The maximum number of patches to return (per page). - `"NextToken"`: The token for the next set of items to return. (You received this token from a previous call.) @@ -2201,6 +2271,38 @@ function describe_instance_patches( ) end +""" + describe_instance_properties() + describe_instance_properties(params::Dict{String,<:Any}) + +An API operation used by the Systems Manager console to display information about Systems +Manager managed nodes. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FiltersWithOperator"`: The request filters to use with the operator. +- `"InstancePropertyFilterList"`: An array of instance property filters. +- `"MaxResults"`: The maximum number of items to return for the call. The call also returns + a token that you can specify in a subsequent call to get the next set of results. +- `"NextToken"`: The token provided by a previous request to use to return the next set of + properties. +""" +function describe_instance_properties(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm( + "DescribeInstanceProperties"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_instance_properties( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm( + "DescribeInstanceProperties", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_inventory_deletions() describe_inventory_deletions(params::Dict{String,<:Any}) @@ -2594,12 +2696,12 @@ end describe_ops_items(params::Dict{String,<:Any}) Query a set of OpsItems. You must have permission in Identity and Access Management (IAM) -to query a list of OpsItems. For more information, see Getting started with OpsCenter in -the Amazon Web Services Systems Manager User Guide. Operations engineers and IT -professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and -remediate operational issues impacting the performance and health of their Amazon Web -Services resources. For more information, see OpsCenter in the Amazon Web Services Systems -Manager User Guide. +to query a list of OpsItems. For more information, see Set up OpsCenter in the Amazon Web +Services Systems Manager User Guide. Operations engineers and IT professionals use Amazon +Web Services Systems Manager OpsCenter to view, investigate, and remediate operational +issues impacting the performance and health of their Amazon Web Services resources. For +more information, see Amazon Web Services Systems Manager OpsCenter in the Amazon Web +Services Systems Manager User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2613,11 +2715,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Operations: Equals,Contains Key: OperationalData** Operations: Equals Key: OperationalDataKey Operations: Equals Key: OperationalDataValue Operations: Equals, Contains Key: OpsItemId Operations: Equals Key: ResourceId Operations: Contains Key: - AutomationId Operations: Equals *The Equals operator for Title matches the first 100 - characters. If you specify more than 100 characters, they system returns an error that the - filter value exceeds the length limit. **If you filter the response by using the - OperationalData operator, specify a key-value pair by using the following JSON format: - {\"key\":\"key_name\",\"value\":\"a_value\"} + AutomationId Operations: Equals Key: AccountId Operations: Equals *The Equals operator + for Title matches the first 100 characters. If you specify more than 100 characters, they + system returns an error that the filter value exceeds the length limit. **If you filter the + response by using the OperationalData operator, specify a key-value pair by using the + following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"} """ function describe_ops_items(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm("DescribeOpsItems"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -2634,7 +2736,8 @@ end describe_parameters() describe_parameters(params::Dict{String,<:Any}) -Get information about a parameter. Request results are returned on a best-effort basis. If +Lists the parameters in your Amazon Web Services account or the parameters shared with you +when you enable the Shared option. Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops @@ -2652,6 +2755,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: The token for the next set of items to return. (You received this token from a previous call.) - `"ParameterFilters"`: Filters to limit the request results. +- `"Shared"`: Lists parameters that are shared with you. By default when using this + option, the command returns parameters that have been shared using a standard Resource + Access Manager Resource Share. In order for a parameter that was shared using the + PutResourcePolicy command to be returned, the associated RAM Resource Share Created From + Policy must have been promoted to a standard Resource Share using the RAM + PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing + parameters, see Working with shared parameters in the Amazon Web Services Systems Manager + User Guide. """ function describe_parameters(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm("DescribeParameters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -3187,8 +3298,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys be either JSON or YAML. JSON is the default format. - `"DocumentVersion"`: The document version for which you want information. - `"VersionName"`: An optional field specifying the version of the artifact associated with - the document. For example, \"Release 12, Update 6\". This value is unique across all - versions of a document and can't be changed. + the document. For example, 12.6. This value is unique across all versions of a document and + can't be changed. """ function get_document(Name; aws_config::AbstractAWSConfig=global_aws_config()) return ssm( @@ -3491,12 +3602,12 @@ end get_ops_item(ops_item_id, params::Dict{String,<:Any}) Get information about an OpsItem by using the ID. You must have permission in Identity and -Access Management (IAM) to view information about an OpsItem. For more information, see -Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide. -Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter -to view, investigate, and remediate operational issues impacting the performance and health -of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon -Web Services Systems Manager User Guide. +Access Management (IAM) to view information about an OpsItem. For more information, see Set +up OpsCenter in the Amazon Web Services Systems Manager User Guide. Operations engineers +and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, +investigate, and remediate operational issues impacting the performance and health of their +Amazon Web Services resources. For more information, see Amazon Web Services Systems +Manager OpsCenter in the Amazon Web Services Systems Manager User Guide. # Arguments - `ops_item_id`: The ID of the OpsItem that you want to get. @@ -3606,8 +3717,11 @@ Get information about a single parameter by specifying the parameter name. To g information about more than one parameter at a time, use the GetParameters operation. # Arguments -- `name`: The name of the parameter you want to query. To query by parameter label, use - \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\". +- `name`: The name or Amazon Resource Name (ARN) of the parameter that you want to query. + For parameters shared with you from another account, you must use the full ARN. To query by + parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": + \"name:version\". For more information about shared parameters, see Working with shared + parameters in the Amazon Web Services Systems Manager User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3643,7 +3757,9 @@ parameter uses to reference KMS. Otherwise, GetParameterHistory retrieves whatev original key alias was referencing. # Arguments -- `name`: The name of the parameter for which you want to review history. +- `name`: The name or Amazon Resource Name (ARN) of the parameter for which you want to + review history. For parameters shared with you from another account, you must use the full + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3681,9 +3797,12 @@ Get information about one or more parameters by specifying multiple parameter na get information about a single parameter, you can use the GetParameter operation instead. # Arguments -- `names`: Names of the parameters for which you want to query information. To query by - parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": - \"name:version\". +- `names`: The names or Amazon Resource Names (ARNs) of the parameters that you want to + query. For parameters shared with you from another account, you must use the full ARNs. To + query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use + \"Name\": \"name:version\". The results for GetParameters requests are listed in + alphabetical order in query responses. For information about shared parameters, see + Working with shared parameters in the Amazon Web Services Systems Manager User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3951,7 +4070,8 @@ isn't associated with a parameter and the system displays it in the list of Inva # Arguments - `labels`: One or more labels to attach to the specified parameter version. -- `name`: The parameter name on which you want to attach one or more labels. +- `name`: The parameter name on which you want to attach one or more labels. You can't + enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4646,7 +4766,7 @@ the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z' State Manager association), Patch, or Custom:string. - `execution_summary`: A summary of the call execution that includes an execution ID, the type of execution (for example, Command), and the date/time of the execution using a - datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'. + datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z' - `items`: Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, and so on. @@ -4764,22 +4884,23 @@ end Add a parameter to the system. # Arguments -- `name`: The fully qualified name of the parameter that you want to add to the system. The - fully qualified name includes the complete hierarchy of the parameter path and name. For - parameters in a hierarchy, you must include a leading forward slash character (/) when you - create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13 Naming - Constraints: Parameter names are case sensitive. A parameter name must be unique within - an Amazon Web Services Region A parameter name can't be prefixed with \"aws\" or \"ssm\" - (case-insensitive). Parameter names can include only the following symbols and letters: - a-zA-Z0-9_.- In addition, the slash character ( / ) is used to delineate hierarchies in - parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter A parameter - name can't include spaces. Parameter hierarchies are limited to a maximum depth of - fifteen levels. For additional information about valid values for parameter names, see - Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide. - The maximum length constraint of 2048 characters listed below includes 1037 characters - reserved for internal use by Systems Manager. The maximum length for a parameter name that - you create is 1011 characters. This includes the characters in the ARN that precede the - name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/. +- `name`: The fully qualified name of the parameter that you want to add to the system. + You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name + itself. The fully qualified name includes the complete hierarchy of the parameter path and + name. For parameters in a hierarchy, you must include a leading forward slash character (/) + when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13 + Naming Constraints: Parameter names are case sensitive. A parameter name must be unique + within an Amazon Web Services Region A parameter name can't be prefixed with \"aws\" or + \"ssm\" (case-insensitive). Parameter names can include only the following symbols and + letters: a-zA-Z0-9_.- In addition, the slash character ( / ) is used to delineate + hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter + A parameter name can't include spaces. Parameter hierarchies are limited to a maximum + depth of fifteen levels. For additional information about valid values for parameter + names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager + User Guide. The maximum length constraint of 2048 characters listed below includes 1037 + characters reserved for internal use by Systems Manager. The maximum length for a parameter + name that you create is 1011 characters. This includes the characters in the ARN that + precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/. - `value`: The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. Parameters can't be referenced or nested in the values of other parameters. You can't include {{}} or @@ -4805,7 +4926,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will fail and the parameter will not be created or updated. To monitor whether your aws:ec2:image parameters are created successfully, see Setting up notifications or trigger actions based on Parameter Store events. For more information about AMI format validation , - see Native parameter support for Amazon Machine Image (AMI) IDs. + see Native parameter support for Amazon Machine Image IDs. - `"Description"`: Information about the parameter that you want to add to the system. Optional but recommended. Don't enter personally identifiable information in this field. - `"KeyId"`: The Key Management Service (KMS) ID that you want to use to encrypt a @@ -4843,34 +4964,34 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are offered at no additional cost. Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an Amazon Web Services account. Advanced parameters - incur a charge. For more information, see Standard and advanced parameter tiers in the - Amazon Web Services Systems Manager User Guide. You can change a standard parameter to an - advanced parameter any time. But you can't revert an advanced parameter to a standard - parameter. Reverting an advanced parameter to a standard parameter would result in data - loss because the system would truncate the size of the parameter from 8 KB to 4 KB. - Reverting would also remove any policies attached to the parameter. Lastly, advanced - parameters use a different form of encryption than standard parameters. If you no longer - need an advanced parameter, or if you no longer want to incur charges for an advanced - parameter, you must delete it and recreate it as a new standard parameter. Using the - Default Tier Configuration In PutParameter requests, you can specify the tier to create - the parameter in. Whenever you specify a tier in the request, Parameter Store creates or - updates the parameter according to that request. However, if you don't specify a tier in a - request, Parameter Store assigns the tier based on the current Parameter Store default tier - configuration. The default tier when you begin using Parameter Store is the - standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the - following as the default: Advanced: With this option, Parameter Store evaluates all - requests as advanced parameters. Intelligent-Tiering: With this option, Parameter Store - evaluates each request to determine if the parameter is standard or advanced. If the - request doesn't include any options that require an advanced parameter, the parameter is - created in the standard-parameter tier. If one or more options requiring an advanced - parameter are included in the request, Parameter Store create a parameter in the - advanced-parameter tier. This approach helps control your parameter-related costs by always - creating standard parameters unless an advanced parameter is necessary. Options that - require an advanced parameter include the following: The content size of the parameter is - more than 4 KB. The parameter uses a parameter policy. More than 10,000 parameters - already exist in your Amazon Web Services account in the current Amazon Web Services - Region. For more information about configuring the default tier option, see Specifying a - default parameter tier in the Amazon Web Services Systems Manager User Guide. + incur a charge. For more information, see Managing parameter tiers in the Amazon Web + Services Systems Manager User Guide. You can change a standard parameter to an advanced + parameter any time. But you can't revert an advanced parameter to a standard parameter. + Reverting an advanced parameter to a standard parameter would result in data loss because + the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also + remove any policies attached to the parameter. Lastly, advanced parameters use a different + form of encryption than standard parameters. If you no longer need an advanced parameter, + or if you no longer want to incur charges for an advanced parameter, you must delete it and + recreate it as a new standard parameter. Using the Default Tier Configuration In + PutParameter requests, you can specify the tier to create the parameter in. Whenever you + specify a tier in the request, Parameter Store creates or updates the parameter according + to that request. However, if you don't specify a tier in a request, Parameter Store assigns + the tier based on the current Parameter Store default tier configuration. The default tier + when you begin using Parameter Store is the standard-parameter tier. If you use the + advanced-parameter tier, you can specify one of the following as the default: Advanced: + With this option, Parameter Store evaluates all requests as advanced parameters. + Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine + if the parameter is standard or advanced. If the request doesn't include any options that + require an advanced parameter, the parameter is created in the standard-parameter tier. If + one or more options requiring an advanced parameter are included in the request, Parameter + Store create a parameter in the advanced-parameter tier. This approach helps control your + parameter-related costs by always creating standard parameters unless an advanced parameter + is necessary. Options that require an advanced parameter include the following: The + content size of the parameter is more than 4 KB. The parameter uses a parameter policy. + More than 10,000 parameters already exist in your Amazon Web Services account in the + current Amazon Web Services Region. For more information about configuring the default + tier option, see Specifying a default parameter tier in the Amazon Web Services Systems + Manager User Guide. - `"Type"`: The type of parameter that you want to add to the system. SecureString isn't currently supported for CloudFormation templates. Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the @@ -4908,9 +5029,25 @@ end Creates or updates a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems -Manager resources. Currently, OpsItemGroup is the only resource that supports Systems -Manager resource policies. The resource policy for OpsItemGroup enables Amazon Web Services -accounts to view and interact with OpsCenter operational work items (OpsItems). +Manager resources. The following resources support Systems Manager resource policies. +OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to +view and interact with OpsCenter operational work items (OpsItems). Parameter - The +resource policy is used to share a parameter with other accounts using Resource Access +Manager (RAM). To share a parameter, it must be in the advanced parameter tier. For +information about parameter tiers, see Managing parameter tiers. For information about +changing an existing standard parameter to an advanced parameter, see Changing a standard +parameter to an advanced parameter. To share a SecureString parameter, it must be encrypted +with a customer managed key, and you must share the key separately through Key Management +Service. Amazon Web Services managed keys cannot be shared. Parameters encrypted with the +default Amazon Web Services managed key can be updated to use a customer managed key +instead. For KMS key definitions, see KMS concepts in the Key Management Service Developer +Guide. While you can share a parameter using the Systems Manager PutResourcePolicy +operation, we recommend using Resource Access Manager (RAM) instead. This is because using +PutResourcePolicy requires the extra step of promoting the parameter to a standard RAM +Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation. +Otherwise, the parameter won't be returned by the Systems Manager DescribeParameters API +operation using the --shared option. For more information, see Sharing a parameter in the +Amazon Web Services Systems Manager User Guide # Arguments - `policy`: A policy you want to associate with a resource. @@ -5164,12 +5301,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys tasks that have the same priority scheduled in parallel. - `"ServiceRoleArn"`: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not - specify a service role ARN, Systems Manager uses your account's service-linked role. If no - service-linked role for Systems Manager exists in your account, it is created when you run - RegisterTaskWithMaintenanceWindow. For more information, see the following topics in the in - the Amazon Web Services Systems Manager User Guide: Using service-linked roles for - Systems Manager Should I use a service-linked role or a custom service role to run - maintenance window tasks? + specify a service role ARN, Systems Manager uses a service-linked role in your account. If + no appropriate service-linked role for Systems Manager exists in your account, it is + created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security + posture, we strongly recommend creating a custom policy and custom service role for running + your maintenance window tasks. The policy can be crafted to provide only the permissions + needed for your particular maintenance window tasks. For more information, see Setting up + maintenance windows in the in the Amazon Web Services Systems Manager User Guide. - `"Targets"`: The targets (either managed nodes or maintenance window targets). One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, @@ -5444,9 +5582,9 @@ Runs commands on one or more managed nodes. - `document_name`: The name of the Amazon Web Services Systems Manager document (SSM document) to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document Amazon Resource Name (ARN). For - more information about how to use shared documents, see Using shared SSM documents in the - Amazon Web Services Systems Manager User Guide. If you specify a document name or ARN that - hasn't been shared with your account, you receive an InvalidDocument error. + more information about how to use shared documents, see Sharing SSM documents in the Amazon + Web Services Systems Manager User Guide. If you specify a document name or ARN that hasn't + been shared with your account, you receive an InvalidDocument error. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5471,8 +5609,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys prefer not to list individual node IDs, we recommend using the Targets option instead. Using Targets, which accepts tag key-value pairs to identify the managed nodes to send commands to, you can a send command to tens, hundreds, or thousands of nodes at once. For - more information about how to use targets, see Using targets and rate controls to send - commands to a fleet in the Amazon Web Services Systems Manager User Guide. + more information about how to use targets, see Run commands at scale in the Amazon Web + Services Systems Manager User Guide. - `"MaxConcurrency"`: (Optional) The maximum number of managed nodes that are allowed to run the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see @@ -5502,8 +5640,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys command to a large number of managed nodes at once. Using Targets, which accepts tag key-value pairs to identify managed nodes, you can send a command to tens, hundreds, or thousands of nodes at once. To send a command to a smaller number of managed nodes, you can - use the InstanceIds option instead. For more information about how to use targets, see - Sending commands to a fleet in the Amazon Web Services Systems Manager User Guide. + use the InstanceIds option instead. For more information about how to use targets, see Run + commands at scale in the Amazon Web Services Systems Manager User Guide. - `"TimeoutSeconds"`: If this time is reached and the command hasn't already started running, it won't run. """ @@ -5575,7 +5713,7 @@ Initiates execution of an Automation runbook. # Arguments - `document_name`: The name of the SSM document to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the - document ARN. For more information about how to use shared documents, see Using shared SSM + document ARN. For more information about how to use shared documents, see Sharing SSM documents in the Amazon Web Services Systems Manager User Guide. # Optional Parameters @@ -5856,7 +5994,8 @@ Remove a label or labels from a parameter. # Arguments - `labels`: One or more labels to delete from the specified parameter version. -- `name`: The name of the parameter from which you want to delete one or more labels. +- `name`: The name of the parameter from which you want to delete one or more labels. You + can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself. - `parameter_version`: The specific version of the parameter which you want to delete one or more labels from. If it isn't present, the call will fail. @@ -5958,6 +6097,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys document if shared from another account, even though the Systems Manager console shows that a new version was processed. If you want to run an association using a new version of a document shared form another account, you must set the document version to default. +- `"Duration"`: The number of hours the association can run before it is canceled. Duration + applies to associations that are currently running, and any pending and in progress + commands on all targets. If a target was taken offline for the association to run, it is + made available again immediately, without a reboot. The Duration parameter applies only + when both these conditions are true: The association for which you specify a duration is + cancelable according to the parameters of the SSM command document or Automation runbook + associated with this execution. The command specifies the ApplyOnlyAtCronInterval + parameter, which means that the association doesn't run immediately after it is updated, + but only according to the specified schedule. - `"MaxConcurrency"`: The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the @@ -6120,8 +6268,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys association unless you previously specifed the apply-only-at-cron-interval parameter. - `"TargetType"`: Specify a new target type for the document. - `"VersionName"`: An optional field specifying the version of the artifact you are - updating with the document. For example, \"Release 12, Update 6\". This value is unique - across all versions of a document, and can't be changed. + updating with the document. For example, 12.6. This value is unique across all versions of + a document, and can't be changed. """ function update_document(Content, Name; aws_config::AbstractAWSConfig=global_aws_config()) return ssm( @@ -6282,7 +6430,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Zone Database on the IANA website. - `"StartDate"`: The date and time, in ISO-8601 Extended format, for when you want the maintenance window to become active. StartDate allows you to delay activation of the - maintenance window until the specified future date. + maintenance window until the specified future date. When using a rate schedule, if you + provide a start date that occurs in the past, the current date and time are used as the + start date. """ function update_maintenance_window( WindowId; aws_config::AbstractAWSConfig=global_aws_config() @@ -6435,12 +6585,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Optional fields that aren't specified are set to null. - `"ServiceRoleArn"`: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not - specify a service role ARN, Systems Manager uses your account's service-linked role. If no - service-linked role for Systems Manager exists in your account, it is created when you run - RegisterTaskWithMaintenanceWindow. For more information, see the following topics in the in - the Amazon Web Services Systems Manager User Guide: Using service-linked roles for - Systems Manager Should I use a service-linked role or a custom service role to run - maintenance window tasks? + specify a service role ARN, Systems Manager uses a service-linked role in your account. If + no appropriate service-linked role for Systems Manager exists in your account, it is + created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security + posture, we strongly recommend creating a custom policy and custom service role for running + your maintenance window tasks. The policy can be crafted to provide only the permissions + needed for your particular maintenance window tasks. For more information, see Setting up + maintenance windows in the in the Amazon Web Services Systems Manager User Guide. - `"Targets"`: The targets (either managed nodes or tags) to modify. Managed nodes are specified using the format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the format Key=tag_name,Values=tag_value. One or more targets must be @@ -6509,9 +6660,9 @@ nodes during the activation process. For more information, see CreateActivation. - `iam_role`: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more - information, see Create an IAM service role for a hybrid environment in the Amazon Web - Services Systems Manager User Guide. You can't specify an IAM service-linked role for this - parameter. You must create a unique role. + information, see Create an IAM service role for a hybrid and multicloud environment in the + Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked + role for this parameter. You must create a unique role. - `instance_id`: The ID of the managed node where you want to update the role. """ @@ -6550,12 +6701,12 @@ end update_ops_item(ops_item_id, params::Dict{String,<:Any}) Edit or change an OpsItem. You must have permission in Identity and Access Management (IAM) -to update an OpsItem. For more information, see Getting started with OpsCenter in the -Amazon Web Services Systems Manager User Guide. Operations engineers and IT professionals -use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate -operational issues impacting the performance and health of their Amazon Web Services -resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager -User Guide. +to update an OpsItem. For more information, see Set up OpsCenter in the Amazon Web Services +Systems Manager User Guide. Operations engineers and IT professionals use Amazon Web +Services Systems Manager OpsCenter to view, investigate, and remediate operational issues +impacting the performance and health of their Amazon Web Services resources. For more +information, see Amazon Web Services Systems Manager OpsCenter in the Amazon Web Services +Systems Manager User Guide. # Arguments - `ops_item_id`: The ID of the OpsItem. @@ -6567,8 +6718,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ActualStartTime"`: The time a runbook workflow started. Currently reported only for the OpsItem type /aws/changerequest. - `"Category"`: Specify a new category for an OpsItem. -- `"Description"`: Update the information about the OpsItem. Provide enough information so - that users reading this OpsItem for the first time understand the issue. +- `"Description"`: User-defined text that contains information about the OpsItem, in + Markdown format. - `"Notifications"`: The Amazon Resource Name (ARN) of an SNS topic where notifications are sent when this OpsItem is edited or changed. - `"OperationalData"`: Add new keys or edit existing key-value pairs of the OperationalData @@ -6700,10 +6851,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as InstalledOther. This is the default - action if no option is specified. BLOCK : Packages in the RejectedPatches list, and - packages that include them as dependencies, aren't installed under any circumstances. If a - package was installed before it was added to the Rejected patches list, it is considered - non-compliant with the patch baseline, and its status is reported as InstalledRejected. + action if no option is specified. BLOCK: Packages in the Rejected patches list, and + packages that include them as dependencies, aren't installed by Patch Manager under any + circumstances. If a package was installed before it was added to the Rejected patches list, + or is installed outside of Patch Manager afterward, it's considered noncompliant with the + patch baseline and its status is reported as InstalledRejected. - `"Replace"`: If True, then all fields that are required by the CreatePatchBaseline operation are also required for this API request. Optional fields that aren't specified are set to null. @@ -6822,14 +6974,16 @@ service setting for the account. to administrators. Implement least privilege access when allowing individuals to configure or modify the Default Host Management Configuration. - `setting_value`: The new value to specify for the service setting. The following list - specifies the available values for each setting. - /ssm/managed-instance/default-ec2-instance-management-role: The name of an IAM role - /ssm/automation/customer-script-log-destination: CloudWatch - /ssm/automation/customer-script-log-group-name: The name of an Amazon CloudWatch Logs log - group /ssm/documents/console/public-sharing-permission: Enable or Disable - /ssm/managed-instance/activation-tier: standard or advanced /ssm/opsinsights/opscenter: - Enabled or Disabled /ssm/parameter-store/default-parameter-tier: Standard, Advanced, - Intelligent-Tiering /ssm/parameter-store/high-throughput-enabled: true or false + specifies the available values for each setting. For + /ssm/managed-instance/default-ec2-instance-management-role, enter the name of an IAM role. + For /ssm/automation/customer-script-log-destination, enter CloudWatch. For + /ssm/automation/customer-script-log-group-name, enter the name of an Amazon CloudWatch Logs + log group. For /ssm/documents/console/public-sharing-permission, enter Enable or Disable. + For /ssm/managed-instance/activation-tier, enter standard or advanced. For + /ssm/opsinsights/opscenter, enter Enabled or Disabled. For + /ssm/parameter-store/default-parameter-tier, enter Standard, Advanced, or + Intelligent-Tiering For /ssm/parameter-store/high-throughput-enabled, enter true or + false. """ function update_service_setting( diff --git a/src/services/ssm_incidents.jl b/src/services/ssm_incidents.jl index 24f3f318c9..3a271e0180 100644 --- a/src/services/ssm_incidents.jl +++ b/src/services/ssm_incidents.jl @@ -4,6 +4,57 @@ using AWS.AWSServices: ssm_incidents using AWS.Compat using AWS.UUIDs +""" + batch_get_incident_findings(finding_ids, incident_record_arn) + batch_get_incident_findings(finding_ids, incident_record_arn, params::Dict{String,<:Any}) + +Retrieves details about all specified findings for an incident, including descriptive +details about each finding. A finding represents a recent application environment change +made by an CodeDeploy deployment or an CloudFormation stack creation or update that can be +investigated as a potential cause of the incident. + +# Arguments +- `finding_ids`: A list of IDs of findings for which you want to view details. +- `incident_record_arn`: The Amazon Resource Name (ARN) of the incident for which you want + to view finding details. + +""" +function batch_get_incident_findings( + findingIds, incidentRecordArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_incidents( + "POST", + "/batchGetIncidentFindings", + Dict{String,Any}( + "findingIds" => findingIds, "incidentRecordArn" => incidentRecordArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_incident_findings( + findingIds, + incidentRecordArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_incidents( + "POST", + "/batchGetIncidentFindings", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "findingIds" => findingIds, "incidentRecordArn" => incidentRecordArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_replication_set(regions) create_replication_set(regions, params::Dict{String,<:Any}) @@ -124,8 +175,11 @@ Manager can detect automatically. # Arguments - `event_data`: A short description of the event. -- `event_time`: The time that the event occurred. -- `event_type`: The type of event. You can create timeline events of type Custom Event. +- `event_time`: The timestamp for when the event occurred. +- `event_type`: The type of event. You can create timeline events of type Custom Event and + Note. To make a Note-type event appear on the Incident notes panel in the console, specify + eventType as Noteand enter the Amazon Resource Name (ARN) of the incident as the value for + eventReference. - `incident_record_arn`: The Amazon Resource Name (ARN) of the incident record that the action adds the incident to. @@ -453,7 +507,8 @@ Retrieves the resource policies attached to the specified response plan. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of resource policies to display for each page of results. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) """ function get_resource_policies( resourceArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -560,6 +615,54 @@ function get_timeline_event( ) end +""" + list_incident_findings(incident_record_arn) + list_incident_findings(incident_record_arn, params::Dict{String,<:Any}) + +Retrieves a list of the IDs of findings, plus their last modified times, that have been +identified for a specified incident. A finding represents a recent application environment +change made by an CloudFormation stack creation or update or an CodeDeploy deployment that +can be investigated as a potential cause of the incident. + +# Arguments +- `incident_record_arn`: The Amazon Resource Name (ARN) of the incident for which you want + to view associated findings. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of findings to retrieve per call. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) +""" +function list_incident_findings( + incidentRecordArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_incidents( + "POST", + "/listIncidentFindings", + Dict{String,Any}("incidentRecordArn" => incidentRecordArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_incident_findings( + incidentRecordArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_incidents( + "POST", + "/listIncidentFindings", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("incidentRecordArn" => incidentRecordArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_incident_records() list_incident_records(params::Dict{String,<:Any}) @@ -577,7 +680,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with more than one value, the response returns incident records that match any of the values provided. - `"maxResults"`: The maximum number of results per page. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) """ function list_incident_records(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm_incidents( @@ -612,7 +716,8 @@ List all related items for an incident record. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of related items per page. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) """ function list_related_items( incidentRecordArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -652,7 +757,8 @@ Lists details about the replication set configured in your account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of results per page. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) """ function list_replication_sets(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm_incidents( @@ -683,7 +789,8 @@ Lists all response plans in your account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of response plans per page. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) """ function list_response_plans(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm_incidents( @@ -706,10 +813,10 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Lists the tags that are attached to the specified response plan. +Lists the tags that are attached to the specified response plan or incident. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the response plan. +- `resource_arn`: The Amazon Resource Name (ARN) of the response plan or incident. """ function list_tags_for_resource( @@ -749,14 +856,15 @@ Lists timeline events for the specified incident record. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filters"`: Filters the timeline events based on the provided conditional values. You - can filter timeline events with the following keys: eventTime eventType Note the - following when deciding how to use Filters: If you don't specify a Filter, the response - includes all timeline events. If you specify more than one filter in a single request, - the response returns timeline events that match all filters. If you specify a filter with - more than one value, the response returns timeline events that match any of the values - provided. + can filter timeline events with the following keys: eventReference eventTime + eventType Note the following when deciding how to use Filters: If you don't specify a + Filter, the response includes all timeline events. If you specify more than one filter in + a single request, the response returns timeline events that match all filters. If you + specify a filter with more than one value, the response returns timeline events that match + any of the values provided. - `"maxResults"`: The maximum number of results per page. -- `"nextToken"`: The pagination token to continue to the next page of results. +- `"nextToken"`: The pagination token for the next set of items to return. (You received + this token from a previous call.) - `"sortBy"`: Sort timeline events by the specified key value pair. - `"sortOrder"`: Sorts the order of timeline events by the value specified in the sortBy field. @@ -851,12 +959,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"clientToken"`: A token ensuring that the operation is called only once with the specified details. - `"impact"`: Defines the impact to the customers. Providing an impact overwrites the - impact provided by a response plan. Possible impacts: 1 - Critical impact, this - typically relates to full application failure that impacts many to all customers. 2 - - High impact, partial application failure with impact to many customers. 3 - Medium - impact, the application is providing reduced service to customers. 4 - Low impact, - customer might aren't impacted by the problem yet. 5 - No impact, customers aren't - currently impacted but urgent action is needed to avoid impact. + impact provided by a response plan. Supported impact codes 1 - Critical 2 - High + 3 - Medium 4 - Low 5 - No Impact - `"relatedItems"`: Add related items to the incident for other responders to use. Related items are Amazon Web Services resources, external links, or files uploaded to an Amazon S3 bucket. @@ -1046,11 +1150,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the specified details. - `"impact"`: Defines the impact of the incident to customers and applications. If you provide an impact for an incident, it overwrites the impact provided by the response plan. - Possible impacts: 1 - Critical impact, full application failure that impacts many to - all customers. 2 - High impact, partial application failure with impact to many - customers. 3 - Medium impact, the application is providing reduced service to customers. - 4 - Low impact, customer aren't impacted by the problem yet. 5 - No impact, customers - aren't currently impacted but urgent action is needed to avoid impact. + Supported impact codes 1 - Critical 2 - High 3 - Medium 4 - Low 5 - No + Impact - `"notificationTargets"`: The Amazon SNS targets that Incident Manager notifies when a client updates an incident. Using multiple SNS topics creates redundancy in the event that a Region is down during the incident. @@ -1216,8 +1317,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"incidentTemplateDedupeString"`: The string Incident Manager uses to prevent duplicate incidents from being created by the same incident in the same account. - `"incidentTemplateImpact"`: Defines the impact to the customers. Providing an impact - overwrites the impact provided by a response plan. Possible impacts: 5 - Severe impact - 4 - High impact 3 - Medium impact 2 - Low impact 1 - No impact + overwrites the impact provided by a response plan. Supported impact codes 1 - Critical + 2 - High 3 - Medium 4 - Low 5 - No Impact - `"incidentTemplateNotificationTargets"`: The Amazon SNS targets that are notified when updates are made to an incident. - `"incidentTemplateSummary"`: A brief summary of the incident. This typically contains @@ -1283,8 +1384,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with the DynamoDB table as a related item. This update action overrides all existing references. If you want to keep existing references, you must specify them in the call. If you don't, this action removes any existing references and enters only new references. -- `"eventTime"`: The time that the event occurred. -- `"eventType"`: The type of event. You can update events of type Custom Event. +- `"eventTime"`: The timestamp for when the event occurred. +- `"eventType"`: The type of event. You can update events of type Custom Event and Note. """ function update_timeline_event( eventId, incidentRecordArn; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/ssm_sap.jl b/src/services/ssm_sap.jl index 150162eb25..f0c802361a 100644 --- a/src/services/ssm_sap.jl +++ b/src/services/ssm_sap.jl @@ -271,6 +271,7 @@ Lists all the applications registered with AWS Systems Manager for SAP. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filter of name, value, and operator. - `"MaxResults"`: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value. - `"NextToken"`: The token for the next page of results. @@ -355,6 +356,54 @@ function list_databases( ) end +""" + list_operation_events(operation_id) + list_operation_events(operation_id, params::Dict{String,<:Any}) + +Returns a list of operations events. Available parameters include OperationID, as well as +optional parameters MaxResults, NextToken, and Filters. + +# Arguments +- `operation_id`: The ID of the operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Optionally specify filters to narrow the returned operation event items. + Valid filter names include status, resourceID, and resourceType. The valid operator for all + three filters is Equals. +- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. If you do not + specify a value for MaxResults, the request returns 50 items per page by default. +- `"NextToken"`: The token to use to retrieve the next page of results. This value is null + when there are no more results to return. +""" +function list_operation_events( + OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_sap( + "POST", + "/list-operation-events", + Dict{String,Any}("OperationId" => OperationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_operation_events( + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_sap( + "POST", + "/list-operation-events", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("OperationId" => OperationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_operations(application_id) list_operations(application_id, params::Dict{String,<:Any}) @@ -489,8 +538,8 @@ function put_resource_permission( end """ - register_application(application_id, application_type, credentials, instances) - register_application(application_id, application_type, credentials, instances, params::Dict{String,<:Any}) + register_application(application_id, application_type, instances) + register_application(application_id, application_type, instances, params::Dict{String,<:Any}) Register an SAP application with AWS Systems Manager for SAP. You must meet the following requirements before registering. The SAP application you want to register with AWS Systems @@ -502,11 +551,12 @@ components. # Arguments - `application_id`: The ID of the application. - `application_type`: The type of the application. -- `credentials`: The credentials of the SAP application. - `instances`: The Amazon EC2 instances on which your SAP application is running. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Credentials"`: The credentials of the SAP application. +- `"DatabaseArn"`: The Amazon Resource Name of the SAP HANA database. - `"SapInstanceNumber"`: The SAP instance number of the application. - `"Sid"`: The System ID of the application. - `"Tags"`: The tags to be attached to the SAP application. @@ -514,7 +564,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys function register_application( ApplicationId, ApplicationType, - Credentials, Instances; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -524,7 +573,6 @@ function register_application( Dict{String,Any}( "ApplicationId" => ApplicationId, "ApplicationType" => ApplicationType, - "Credentials" => Credentials, "Instances" => Instances, ); aws_config=aws_config, @@ -534,7 +582,6 @@ end function register_application( ApplicationId, ApplicationType, - Credentials, Instances, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -548,7 +595,6 @@ function register_application( Dict{String,Any}( "ApplicationId" => ApplicationId, "ApplicationType" => ApplicationType, - "Credentials" => Credentials, "Instances" => Instances, ), params, @@ -559,6 +605,121 @@ function register_application( ) end +""" + start_application(application_id) + start_application(application_id, params::Dict{String,<:Any}) + +Request is an operation which starts an application. Parameter ApplicationId is required. + +# Arguments +- `application_id`: The ID of the application. + +""" +function start_application(ApplicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_sap( + "POST", + "/start-application", + Dict{String,Any}("ApplicationId" => ApplicationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_application( + ApplicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_sap( + "POST", + "/start-application", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationId" => ApplicationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_application_refresh(application_id) + start_application_refresh(application_id, params::Dict{String,<:Any}) + +Refreshes a registered application. + +# Arguments +- `application_id`: The ID of the application. + +""" +function start_application_refresh( + ApplicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_sap( + "POST", + "/start-application-refresh", + Dict{String,Any}("ApplicationId" => ApplicationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_application_refresh( + ApplicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_sap( + "POST", + "/start-application-refresh", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationId" => ApplicationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_application(application_id) + stop_application(application_id, params::Dict{String,<:Any}) + +Request is an operation to stop an application. Parameter ApplicationId is required. +Parameters StopConnectedEntity and IncludeEc2InstanceShutdown are optional. + +# Arguments +- `application_id`: The ID of the application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncludeEc2InstanceShutdown"`: Boolean. If included and if set to True, the + StopApplication operation will shut down the associated Amazon EC2 instance in addition to + the application. +- `"StopConnectedEntity"`: Specify the ConnectedEntityType. Accepted type is DBMS. If this + parameter is included, the connected DBMS (Database Management System) will be stopped. +""" +function stop_application(ApplicationId; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_sap( + "POST", + "/stop-application", + Dict{String,Any}("ApplicationId" => ApplicationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_application( + ApplicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_sap( + "POST", + "/stop-application", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationId" => ApplicationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -643,8 +804,11 @@ Updates the settings of an application registered with AWS Systems Manager for S # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Backint"`: Installation of AWS Backint Agent for SAP HANA. - `"CredentialsToAddOrUpdate"`: The credentials to be added or updated. - `"CredentialsToRemove"`: The credentials to be removed. +- `"DatabaseArn"`: The Amazon Resource Name of the SAP HANA database that replaces the + current SAP HANA connection with the SAP_ABAP application. """ function update_application_settings( ApplicationId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/sso_admin.jl b/src/services/sso_admin.jl index e6cbab6c70..cdc93008cc 100644 --- a/src/services/sso_admin.jl +++ b/src/services/sso_admin.jl @@ -12,8 +12,8 @@ Attaches the specified customer managed policy to the specified PermissionSet. # Arguments - `customer_managed_policy_reference`: Specifies the name and path of a customer managed - policy. You must have an IAM policy that matches the name and path in each AWS account - where you want to deploy your permission set. + policy. You must have an IAM policy that matches the name and path in each Amazon Web + Services account where you want to deploy your permission set. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation will be executed. - `permission_set_arn`: The ARN of the PermissionSet. @@ -65,16 +65,17 @@ end attach_managed_policy_to_permission_set(instance_arn, managed_policy_arn, permission_set_arn) attach_managed_policy_to_permission_set(instance_arn, managed_policy_arn, permission_set_arn, params::Dict{String,<:Any}) -Attaches an AWS managed policy ARN to a permission set. If the permission set is already -referenced by one or more account assignments, you will need to call +Attaches an Amazon Web Services managed policy ARN to a permission set. If the permission +set is already referenced by one or more account assignments, you will need to call ProvisionPermissionSet after this operation. Calling ProvisionPermissionSet applies the corresponding IAM policy updates to all assigned accounts. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `managed_policy_arn`: The AWS managed policy ARN to be attached to a permission set. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `managed_policy_arn`: The Amazon Web Services managed policy ARN to be attached to a + permission set. - `permission_set_arn`: The ARN of the PermissionSet that the managed policy should be attached to. @@ -125,21 +126,21 @@ end create_account_assignment(instance_arn, permission_set_arn, principal_id, principal_type, target_id, target_type) create_account_assignment(instance_arn, permission_set_arn, principal_id, principal_type, target_id, target_type, params::Dict{String,<:Any}) -Assigns access to a principal for a specified AWS account using a specified permission set. - The term principal here refers to a user or group that is defined in IAM Identity Center. - As part of a successful CreateAccountAssignment call, the specified permission set will -automatically be provisioned to the account in the form of an IAM policy. That policy is -attached to the IAM role created in IAM Identity Center. If the permission set is -subsequently updated, the corresponding IAM policies attached to roles in your accounts -will not be updated automatically. In this case, you must call ProvisionPermissionSet to -make these updates. After a successful response, call +Assigns access to a principal for a specified Amazon Web Services account using a specified +permission set. The term principal here refers to a user or group that is defined in IAM +Identity Center. As part of a successful CreateAccountAssignment call, the specified +permission set will automatically be provisioned to the account in the form of an IAM +policy. That policy is attached to the IAM role created in IAM Identity Center. If the +permission set is subsequently updated, the corresponding IAM policies attached to roles in +your accounts will not be updated automatically. In this case, you must call +ProvisionPermissionSet to make these updates. After a successful response, call DescribeAccountAssignmentCreationStatus to describe the status of an assignment creation request. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `permission_set_arn`: The ARN of the permission set that the admin wants to grant the principal access to. - `principal_id`: An identifier for an object in IAM Identity Center, such as a user or @@ -147,8 +148,8 @@ request. information about PrincipalIds in IAM Identity Center, see the IAM Identity Center Identity Store API Reference. - `principal_type`: The entity type for which the assignment will be created. -- `target_id`: TargetID is an AWS account identifier, typically a 10-12 digit string (For - example, 123456789012). +- `target_id`: TargetID is an Amazon Web Services account identifier, (For example, + 123456789012). - `target_type`: The entity type for which the assignment will be created. """ @@ -206,6 +207,182 @@ function create_account_assignment( ) end +""" + create_application(application_provider_arn, instance_arn, name) + create_application(application_provider_arn, instance_arn, name, params::Dict{String,<:Any}) + +Creates an application in IAM Identity Center for the given application provider. + +# Arguments +- `application_provider_arn`: The ARN of the application provider under which the operation + will run. +- `instance_arn`: The ARN of the instance of IAM Identity Center under which the operation + will run. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web + Services Service Namespaces in the Amazon Web Services General Reference. +- `name`: The name of the . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Specifies a unique, case-sensitive ID that you provide to ensure the + idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"Description"`: The description of the . +- `"PortalOptions"`: A structure that describes the options for the portal associated with + an application. +- `"Status"`: Specifies whether the application is enabled or disabled. +- `"Tags"`: Specifies tags to be attached to the application. +""" +function create_application( + ApplicationProviderArn, + InstanceArn, + Name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateApplication", + Dict{String,Any}( + "ApplicationProviderArn" => ApplicationProviderArn, + "InstanceArn" => InstanceArn, + "Name" => Name, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_application( + ApplicationProviderArn, + InstanceArn, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationProviderArn" => ApplicationProviderArn, + "InstanceArn" => InstanceArn, + "Name" => Name, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_application_assignment(application_arn, principal_id, principal_type) + create_application_assignment(application_arn, principal_id, principal_type, params::Dict{String,<:Any}) + +Grant application access to a user or group. + +# Arguments +- `application_arn`: The ARN of the application provider under which the operation will run. +- `principal_id`: An identifier for an object in IAM Identity Center, such as a user or + group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more + information about PrincipalIds in IAM Identity Center, see the IAM Identity Center Identity + Store API Reference. +- `principal_type`: The entity type for which the assignment will be created. + +""" +function create_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateApplicationAssignment", + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateApplicationAssignment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_instance() + create_instance(params::Dict{String,<:Any}) + +Creates an instance of IAM Identity Center for a standalone Amazon Web Services account +that is not managed by Organizations or a member Amazon Web Services account in an +organization. You can create only one instance per account and across all Amazon Web +Services Regions. The CreateInstance request is rejected if the following apply: The +instance is created within the organization management account. An instance already +exists in the same account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Specifies a unique, case-sensitive ID that you provide to ensure the + idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"Name"`: The name of the instance of IAM Identity Center. +- `"Tags"`: Specifies tags to be attached to the instance of IAM Identity Center. +""" +function create_instance(; aws_config::AbstractAWSConfig=global_aws_config()) + return sso_admin( + "CreateInstance", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_instance( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "CreateInstance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_instance_access_control_attribute_configuration(instance_access_control_attribute_configuration, instance_arn) create_instance_access_control_attribute_configuration(instance_access_control_attribute_configuration, instance_arn, params::Dict{String,<:Any}) @@ -274,12 +451,12 @@ end create_permission_set(instance_arn, name, params::Dict{String,<:Any}) Creates a permission set within a specified IAM Identity Center instance. To grant users -and groups access to AWS account resources, use CreateAccountAssignment . +and groups access to Amazon Web Services account resources, use CreateAccountAssignment . # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `name`: The name of the PermissionSet. # Optional Parameters @@ -321,26 +498,106 @@ function create_permission_set( ) end +""" + create_trusted_token_issuer(instance_arn, name, trusted_token_issuer_configuration, trusted_token_issuer_type) + create_trusted_token_issuer(instance_arn, name, trusted_token_issuer_configuration, trusted_token_issuer_type, params::Dict{String,<:Any}) + +Creates a connection to a trusted token issuer in an instance of IAM Identity Center. A +trusted token issuer enables trusted identity propagation to be used with applications that +authenticate outside of Amazon Web Services. This trusted token issuer describes an +external identity provider (IdP) that can generate claims or assertions in the form of +access tokens for a user. Applications enabled for IAM Identity Center can use these tokens +for authentication. + +# Arguments +- `instance_arn`: Specifies the ARN of the instance of IAM Identity Center to contain the + new trusted token issuer configuration. +- `name`: Specifies the name of the new trusted token issuer configuration. +- `trusted_token_issuer_configuration`: Specifies settings that apply to the new trusted + token issuer configuration. The settings that are available depend on what + TrustedTokenIssuerType you specify. +- `trusted_token_issuer_type`: Specifies the type of the new trusted token issuer. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Specifies a unique, case-sensitive ID that you provide to ensure the + idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value.. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"Tags"`: Specifies tags to be attached to the new trusted token issuer configuration. +""" +function create_trusted_token_issuer( + InstanceArn, + Name, + TrustedTokenIssuerConfiguration, + TrustedTokenIssuerType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateTrustedTokenIssuer", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "Name" => Name, + "TrustedTokenIssuerConfiguration" => TrustedTokenIssuerConfiguration, + "TrustedTokenIssuerType" => TrustedTokenIssuerType, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_trusted_token_issuer( + InstanceArn, + Name, + TrustedTokenIssuerConfiguration, + TrustedTokenIssuerType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "CreateTrustedTokenIssuer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "Name" => Name, + "TrustedTokenIssuerConfiguration" => TrustedTokenIssuerConfiguration, + "TrustedTokenIssuerType" => TrustedTokenIssuerType, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_account_assignment(instance_arn, permission_set_arn, principal_id, principal_type, target_id, target_type) delete_account_assignment(instance_arn, permission_set_arn, principal_id, principal_type, target_id, target_type, params::Dict{String,<:Any}) -Deletes a principal's access from a specified AWS account using a specified permission set. - After a successful response, call DescribeAccountAssignmentCreationStatus to describe the -status of an assignment deletion request. +Deletes a principal's access from a specified Amazon Web Services account using a specified +permission set. After a successful response, call DescribeAccountAssignmentDeletionStatus +to describe the status of an assignment deletion request. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `permission_set_arn`: The ARN of the permission set that will be used to remove access. - `principal_id`: An identifier for an object in IAM Identity Center, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in IAM Identity Center, see the IAM Identity Center Identity Store API Reference. - `principal_type`: The entity type for which the assignment will be deleted. -- `target_id`: TargetID is an AWS account identifier, typically a 10-12 digit string (For - example, 123456789012). +- `target_id`: TargetID is an Amazon Web Services account identifier, (For example, + 123456789012). - `target_type`: The entity type for which the assignment will be deleted. """ @@ -399,46 +656,36 @@ function delete_account_assignment( end """ - delete_inline_policy_from_permission_set(instance_arn, permission_set_arn) - delete_inline_policy_from_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_application(application_arn) + delete_application(application_arn, params::Dict{String,<:Any}) -Deletes the inline policy from a specified permission set. +Deletes the association with the application. The connected service resource still exists. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set that will be used to remove access. +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. """ -function delete_inline_policy_from_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function delete_application( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DeleteInlinePolicyFromPermissionSet", - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ); + "DeleteApplication", + Dict{String,Any}("ApplicationArn" => ApplicationArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_inline_policy_from_permission_set( - InstanceArn, - PermissionSetArn, +function delete_application( + ApplicationArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeleteInlinePolicyFromPermissionSet", + "DeleteApplication", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ), - params, - ), + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -446,39 +693,40 @@ function delete_inline_policy_from_permission_set( end """ - delete_instance_access_control_attribute_configuration(instance_arn) - delete_instance_access_control_attribute_configuration(instance_arn, params::Dict{String,<:Any}) + delete_application_access_scope(application_arn, scope) + delete_application_access_scope(application_arn, scope, params::Dict{String,<:Any}) -Disables the attributes-based access control (ABAC) feature for the specified IAM Identity -Center instance and deletes all of the attribute mappings that have been configured. Once -deleted, any attributes that are received from an identity source and any custom attributes -you have previously configured will not be passed. For more information about ABAC, see -Attribute-Based Access Control in the IAM Identity Center User Guide. +Deletes an IAM Identity Center access scope from an application. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. +- `application_arn`: Specifies the ARN of the application with the access scope to delete. +- `scope`: Specifies the name of the access scope to remove from the application. """ -function delete_instance_access_control_attribute_configuration( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function delete_application_access_scope( + ApplicationArn, Scope; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DeleteInstanceAccessControlAttributeConfiguration", - Dict{String,Any}("InstanceArn" => InstanceArn); + "DeleteApplicationAccessScope", + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_instance_access_control_attribute_configuration( - InstanceArn, +function delete_application_access_scope( + ApplicationArn, + Scope, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeleteInstanceAccessControlAttributeConfiguration", + "DeleteApplicationAccessScope", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + mergewith( + _merge, + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -486,43 +734,54 @@ function delete_instance_access_control_attribute_configuration( end """ - delete_permission_set(instance_arn, permission_set_arn) - delete_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_application_assignment(application_arn, principal_id, principal_type) + delete_application_assignment(application_arn, principal_id, principal_type, params::Dict{String,<:Any}) -Deletes the specified permission set. +Revoke application access to an application by deleting application assignments for a user +or group. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set that should be deleted. +- `application_arn`: Specifies the ARN of the application. +- `principal_id`: An identifier for an object in IAM Identity Center, such as a user or + group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more + information about PrincipalIds in IAM Identity Center, see the IAM Identity Center Identity + Store API Reference. +- `principal_type`: The entity type for which the assignment will be deleted. """ -function delete_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function delete_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeletePermissionSet", + "DeleteApplicationAssignment", Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_permission_set( - InstanceArn, - PermissionSetArn, +function delete_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeletePermissionSet", + "DeleteApplicationAssignment", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, ), params, ), @@ -533,42 +792,47 @@ function delete_permission_set( end """ - delete_permissions_boundary_from_permission_set(instance_arn, permission_set_arn) - delete_permissions_boundary_from_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_application_authentication_method(application_arn, authentication_method_type) + delete_application_authentication_method(application_arn, authentication_method_type, params::Dict{String,<:Any}) -Deletes the permissions boundary from a specified PermissionSet. +Deletes an authentication method from an application. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. -- `permission_set_arn`: The ARN of the PermissionSet. +- `application_arn`: Specifies the ARN of the application with the authentication method to + delete. +- `authentication_method_type`: Specifies the authentication method type to delete from the + application. """ -function delete_permissions_boundary_from_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function delete_application_authentication_method( + ApplicationArn, + AuthenticationMethodType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeletePermissionsBoundaryFromPermissionSet", + "DeleteApplicationAuthenticationMethod", Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "ApplicationArn" => ApplicationArn, + "AuthenticationMethodType" => AuthenticationMethodType, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function delete_permissions_boundary_from_permission_set( - InstanceArn, - PermissionSetArn, +function delete_application_authentication_method( + ApplicationArn, + AuthenticationMethodType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DeletePermissionsBoundaryFromPermissionSet", + "DeleteApplicationAuthenticationMethod", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "ApplicationArn" => ApplicationArn, + "AuthenticationMethodType" => AuthenticationMethodType, ), params, ), @@ -579,49 +843,39 @@ function delete_permissions_boundary_from_permission_set( end """ - describe_account_assignment_creation_status(account_assignment_creation_request_id, instance_arn) - describe_account_assignment_creation_status(account_assignment_creation_request_id, instance_arn, params::Dict{String,<:Any}) + delete_application_grant(application_arn, grant_type) + delete_application_grant(application_arn, grant_type, params::Dict{String,<:Any}) -Describes the status of the assignment creation request. +Deletes a grant from an application. # Arguments -- `account_assignment_creation_request_id`: The identifier that is used to track the - request operation progress. -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. +- `application_arn`: Specifies the ARN of the application with the grant to delete. +- `grant_type`: Specifies the type of grant to delete from the application. """ -function describe_account_assignment_creation_status( - AccountAssignmentCreationRequestId, - InstanceArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_application_grant( + ApplicationArn, GrantType; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DescribeAccountAssignmentCreationStatus", - Dict{String,Any}( - "AccountAssignmentCreationRequestId" => AccountAssignmentCreationRequestId, - "InstanceArn" => InstanceArn, - ); + "DeleteApplicationGrant", + Dict{String,Any}("ApplicationArn" => ApplicationArn, "GrantType" => GrantType); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_account_assignment_creation_status( - AccountAssignmentCreationRequestId, - InstanceArn, +function delete_application_grant( + ApplicationArn, + GrantType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DescribeAccountAssignmentCreationStatus", + "DeleteApplicationGrant", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "AccountAssignmentCreationRequestId" => - AccountAssignmentCreationRequestId, - "InstanceArn" => InstanceArn, + "ApplicationArn" => ApplicationArn, "GrantType" => GrantType ), params, ), @@ -632,49 +886,43 @@ function describe_account_assignment_creation_status( end """ - describe_account_assignment_deletion_status(account_assignment_deletion_request_id, instance_arn) - describe_account_assignment_deletion_status(account_assignment_deletion_request_id, instance_arn, params::Dict{String,<:Any}) + delete_inline_policy_from_permission_set(instance_arn, permission_set_arn) + delete_inline_policy_from_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) -Describes the status of the assignment deletion request. +Deletes the inline policy from a specified permission set. # Arguments -- `account_assignment_deletion_request_id`: The identifier that is used to track the - request operation progress. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set that will be used to remove access. """ -function describe_account_assignment_deletion_status( - AccountAssignmentDeletionRequestId, - InstanceArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_inline_policy_from_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DescribeAccountAssignmentDeletionStatus", + "DeleteInlinePolicyFromPermissionSet", Dict{String,Any}( - "AccountAssignmentDeletionRequestId" => AccountAssignmentDeletionRequestId, - "InstanceArn" => InstanceArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_account_assignment_deletion_status( - AccountAssignmentDeletionRequestId, +function delete_inline_policy_from_permission_set( InstanceArn, + PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DescribeAccountAssignmentDeletionStatus", + "DeleteInlinePolicyFromPermissionSet", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "AccountAssignmentDeletionRequestId" => - AccountAssignmentDeletionRequestId, - "InstanceArn" => InstanceArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ), params, ), @@ -685,37 +933,33 @@ function describe_account_assignment_deletion_status( end """ - describe_instance_access_control_attribute_configuration(instance_arn) - describe_instance_access_control_attribute_configuration(instance_arn, params::Dict{String,<:Any}) + delete_instance(instance_arn) + delete_instance(instance_arn, params::Dict{String,<:Any}) -Returns the list of IAM Identity Center identity store attributes that have been configured -to work with attributes-based access control (ABAC) for the specified IAM Identity Center -instance. This will not return attributes configured and sent by an external identity -provider. For more information about ABAC, see Attribute-Based Access Control in the IAM -Identity Center User Guide. +Deletes the instance of IAM Identity Center. Only the account that owns the instance can +call this API. Neither the delegated administrator nor member account can delete the +organization instance, but those roles can delete their own instance. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. +- `instance_arn`: The ARN of the instance of IAM Identity Center under which the operation + will run. """ -function describe_instance_access_control_attribute_configuration( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() -) +function delete_instance(InstanceArn; aws_config::AbstractAWSConfig=global_aws_config()) return sso_admin( - "DescribeInstanceAccessControlAttributeConfiguration", + "DeleteInstance", Dict{String,Any}("InstanceArn" => InstanceArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_instance_access_control_attribute_configuration( +function delete_instance( InstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DescribeInstanceAccessControlAttributeConfiguration", + "DeleteInstance", Dict{String,Any}( mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); @@ -725,46 +969,39 @@ function describe_instance_access_control_attribute_configuration( end """ - describe_permission_set(instance_arn, permission_set_arn) - describe_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_instance_access_control_attribute_configuration(instance_arn) + delete_instance_access_control_attribute_configuration(instance_arn, params::Dict{String,<:Any}) -Gets the details of the permission set. +Disables the attributes-based access control (ABAC) feature for the specified IAM Identity +Center instance and deletes all of the attribute mappings that have been configured. Once +deleted, any attributes that are received from an identity source and any custom attributes +you have previously configured will not be passed. For more information about ABAC, see +Attribute-Based Access Control in the IAM Identity Center User Guide. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set. + will be executed. """ -function describe_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function delete_instance_access_control_attribute_configuration( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DescribePermissionSet", - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ); + "DeleteInstanceAccessControlAttributeConfiguration", + Dict{String,Any}("InstanceArn" => InstanceArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_permission_set( +function delete_instance_access_control_attribute_configuration( InstanceArn, - PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DescribePermissionSet", + "DeleteInstanceAccessControlAttributeConfiguration", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ), - params, - ), + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -772,48 +1009,43 @@ function describe_permission_set( end """ - describe_permission_set_provisioning_status(instance_arn, provision_permission_set_request_id) - describe_permission_set_provisioning_status(instance_arn, provision_permission_set_request_id, params::Dict{String,<:Any}) + delete_permission_set(instance_arn, permission_set_arn) + delete_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) -Describes the status for the given permission set provisioning request. +Deletes the specified permission set. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `provision_permission_set_request_id`: The identifier that is provided by the - ProvisionPermissionSet call to retrieve the current status of the provisioning workflow. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set that should be deleted. """ -function describe_permission_set_provisioning_status( - InstanceArn, - ProvisionPermissionSetRequestId; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DescribePermissionSetProvisioningStatus", + "DeletePermissionSet", Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ProvisionPermissionSetRequestId" => ProvisionPermissionSetRequestId, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function describe_permission_set_provisioning_status( +function delete_permission_set( InstanceArn, - ProvisionPermissionSetRequestId, + PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DescribePermissionSetProvisioningStatus", + "DeletePermissionSet", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ProvisionPermissionSetRequestId" => ProvisionPermissionSetRequestId, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ), params, ), @@ -824,53 +1056,42 @@ function describe_permission_set_provisioning_status( end """ - detach_customer_managed_policy_reference_from_permission_set(customer_managed_policy_reference, instance_arn, permission_set_arn) - detach_customer_managed_policy_reference_from_permission_set(customer_managed_policy_reference, instance_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_permissions_boundary_from_permission_set(instance_arn, permission_set_arn) + delete_permissions_boundary_from_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) -Detaches the specified customer managed policy from the specified PermissionSet. +Deletes the permissions boundary from a specified PermissionSet. # Arguments -- `customer_managed_policy_reference`: Specifies the name and path of a customer managed - policy. You must have an IAM policy that matches the name and path in each AWS account - where you want to deploy your permission set. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation will be executed. - `permission_set_arn`: The ARN of the PermissionSet. """ -function detach_customer_managed_policy_reference_from_permission_set( - CustomerManagedPolicyReference, - InstanceArn, - PermissionSetArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_permissions_boundary_from_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DetachCustomerManagedPolicyReferenceFromPermissionSet", + "DeletePermissionsBoundaryFromPermissionSet", Dict{String,Any}( - "CustomerManagedPolicyReference" => CustomerManagedPolicyReference, - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function detach_customer_managed_policy_reference_from_permission_set( - CustomerManagedPolicyReference, +function delete_permissions_boundary_from_permission_set( InstanceArn, PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DetachCustomerManagedPolicyReferenceFromPermissionSet", + "DeletePermissionsBoundaryFromPermissionSet", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "CustomerManagedPolicyReference" => CustomerManagedPolicyReference, - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ), params, ), @@ -881,54 +1102,39 @@ function detach_customer_managed_policy_reference_from_permission_set( end """ - detach_managed_policy_from_permission_set(instance_arn, managed_policy_arn, permission_set_arn) - detach_managed_policy_from_permission_set(instance_arn, managed_policy_arn, permission_set_arn, params::Dict{String,<:Any}) + delete_trusted_token_issuer(trusted_token_issuer_arn) + delete_trusted_token_issuer(trusted_token_issuer_arn, params::Dict{String,<:Any}) -Detaches the attached AWS managed policy ARN from the specified permission set. +Deletes a trusted token issuer configuration from an instance of IAM Identity Center. +Deleting this trusted token issuer configuration will cause users to lose access to any +applications that are configured to use the trusted token issuer. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `managed_policy_arn`: The AWS managed policy ARN to be detached from a permission set. -- `permission_set_arn`: The ARN of the PermissionSet from which the policy should be - detached. +- `trusted_token_issuer_arn`: Specifies the ARN of the trusted token issuer configuration + to delete. """ -function detach_managed_policy_from_permission_set( - InstanceArn, - ManagedPolicyArn, - PermissionSetArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function delete_trusted_token_issuer( + TrustedTokenIssuerArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "DetachManagedPolicyFromPermissionSet", - Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ManagedPolicyArn" => ManagedPolicyArn, - "PermissionSetArn" => PermissionSetArn, - ); + "DeleteTrustedTokenIssuer", + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function detach_managed_policy_from_permission_set( - InstanceArn, - ManagedPolicyArn, - PermissionSetArn, +function delete_trusted_token_issuer( + TrustedTokenIssuerArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "DetachManagedPolicyFromPermissionSet", + "DeleteTrustedTokenIssuer", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ManagedPolicyArn" => ManagedPolicyArn, - "PermissionSetArn" => PermissionSetArn, - ), + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn), params, ), ); @@ -938,43 +1144,49 @@ function detach_managed_policy_from_permission_set( end """ - get_inline_policy_for_permission_set(instance_arn, permission_set_arn) - get_inline_policy_for_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + describe_account_assignment_creation_status(account_assignment_creation_request_id, instance_arn) + describe_account_assignment_creation_status(account_assignment_creation_request_id, instance_arn, params::Dict{String,<:Any}) -Obtains the inline policy assigned to the permission set. +Describes the status of the assignment creation request. # Arguments +- `account_assignment_creation_request_id`: The identifier that is used to track the + request operation progress. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. """ -function get_inline_policy_for_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_account_assignment_creation_status( + AccountAssignmentCreationRequestId, + InstanceArn; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "GetInlinePolicyForPermissionSet", + "DescribeAccountAssignmentCreationStatus", Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "AccountAssignmentCreationRequestId" => AccountAssignmentCreationRequestId, + "InstanceArn" => InstanceArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_inline_policy_for_permission_set( +function describe_account_assignment_creation_status( + AccountAssignmentCreationRequestId, InstanceArn, - PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "GetInlinePolicyForPermissionSet", + "DescribeAccountAssignmentCreationStatus", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "AccountAssignmentCreationRequestId" => + AccountAssignmentCreationRequestId, + "InstanceArn" => InstanceArn, ), params, ), @@ -985,42 +1197,49 @@ function get_inline_policy_for_permission_set( end """ - get_permissions_boundary_for_permission_set(instance_arn, permission_set_arn) - get_permissions_boundary_for_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + describe_account_assignment_deletion_status(account_assignment_deletion_request_id, instance_arn) + describe_account_assignment_deletion_status(account_assignment_deletion_request_id, instance_arn, params::Dict{String,<:Any}) -Obtains the permissions boundary for a specified PermissionSet. +Describes the status of the assignment deletion request. # Arguments +- `account_assignment_deletion_request_id`: The identifier that is used to track the + request operation progress. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. -- `permission_set_arn`: The ARN of the PermissionSet. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. """ -function get_permissions_boundary_for_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_account_assignment_deletion_status( + AccountAssignmentDeletionRequestId, + InstanceArn; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "GetPermissionsBoundaryForPermissionSet", + "DescribeAccountAssignmentDeletionStatus", Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "AccountAssignmentDeletionRequestId" => AccountAssignmentDeletionRequestId, + "InstanceArn" => InstanceArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_permissions_boundary_for_permission_set( +function describe_account_assignment_deletion_status( + AccountAssignmentDeletionRequestId, InstanceArn, - PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "GetPermissionsBoundaryForPermissionSet", + "DescribeAccountAssignmentDeletionStatus", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + "AccountAssignmentDeletionRequestId" => + AccountAssignmentDeletionRequestId, + "InstanceArn" => InstanceArn, ), params, ), @@ -1031,43 +1250,36 @@ function get_permissions_boundary_for_permission_set( end """ - list_account_assignment_creation_status(instance_arn) - list_account_assignment_creation_status(instance_arn, params::Dict{String,<:Any}) + describe_application(application_arn) + describe_application(application_arn, params::Dict{String,<:Any}) -Lists the status of the AWS account assignment creation requests for a specified IAM -Identity Center instance. +Retrieves the details of an application associated with an instance of IAM Identity Center. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: Filters results based on the passed attribute value. -- `"MaxResults"`: The maximum number of results to display for the assignment. -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. """ -function list_account_assignment_creation_status( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_application( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListAccountAssignmentCreationStatus", - Dict{String,Any}("InstanceArn" => InstanceArn); + "DescribeApplication", + Dict{String,Any}("ApplicationArn" => ApplicationArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_account_assignment_creation_status( - InstanceArn, +function describe_application( + ApplicationArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListAccountAssignmentCreationStatus", + "DescribeApplication", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1075,41 +1287,1321 @@ function list_account_assignment_creation_status( end """ - list_account_assignment_deletion_status(instance_arn) - list_account_assignment_deletion_status(instance_arn, params::Dict{String,<:Any}) + describe_application_assignment(application_arn, principal_id, principal_type) + describe_application_assignment(application_arn, principal_id, principal_type, params::Dict{String,<:Any}) -Lists the status of the AWS account assignment deletion requests for a specified IAM -Identity Center instance. +Retrieves a direct assignment of a user or group to an application. If the user doesn’t +have a direct assignment to the application, the user may still have access to the +application through a group. Therefore, don’t use this API to test access to an +application for a user. Instead use ListApplicationAssignmentsForPrincipal. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. +- `principal_id`: An identifier for an object in IAM Identity Center, such as a user or + group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more + information about PrincipalIds in IAM Identity Center, see the IAM Identity Center Identity + Store API Reference. +- `principal_type`: The entity type for which the assignment will be created. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: Filters results based on the passed attribute value. -- `"MaxResults"`: The maximum number of results to display for the assignment. -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. """ -function list_account_assignment_deletion_status( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function describe_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListAccountAssignmentDeletionStatus", - Dict{String,Any}("InstanceArn" => InstanceArn); + "DescribeApplicationAssignment", + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_assignment( + ApplicationArn, + PrincipalId, + PrincipalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribeApplicationAssignment", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_application_provider(application_provider_arn) + describe_application_provider(application_provider_arn, params::Dict{String,<:Any}) + +Retrieves details about a provider that can be used to connect an Amazon Web Services +managed application or customer managed application to IAM Identity Center. + +# Arguments +- `application_provider_arn`: Specifies the ARN of the application provider for which you + want details. + +""" +function describe_application_provider( + ApplicationProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "DescribeApplicationProvider", + Dict{String,Any}("ApplicationProviderArn" => ApplicationProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_provider( + ApplicationProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribeApplicationProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ApplicationProviderArn" => ApplicationProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_instance(instance_arn) + describe_instance(instance_arn, params::Dict{String,<:Any}) + +Returns the details of an instance of IAM Identity Center. The status can be one of the +following: CREATE_IN_PROGRESS - The instance is in the process of being created. When +the instance is ready for use, DescribeInstance returns the status of ACTIVE. While the +instance is in the CREATE_IN_PROGRESS state, you can call only DescribeInstance and +DeleteInstance operations. DELETE_IN_PROGRESS - The instance is being deleted. Returns +AccessDeniedException after the delete operation completes. ACTIVE - The instance is +active. + +# Arguments +- `instance_arn`: The ARN of the instance of IAM Identity Center under which the operation + will run. + +""" +function describe_instance(InstanceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return sso_admin( + "DescribeInstance", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_instance( + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribeInstance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_instance_access_control_attribute_configuration(instance_arn) + describe_instance_access_control_attribute_configuration(instance_arn, params::Dict{String,<:Any}) + +Returns the list of IAM Identity Center identity store attributes that have been configured +to work with attributes-based access control (ABAC) for the specified IAM Identity Center +instance. This will not return attributes configured and sent by an external identity +provider. For more information about ABAC, see Attribute-Based Access Control in the IAM +Identity Center User Guide. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. + +""" +function describe_instance_access_control_attribute_configuration( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "DescribeInstanceAccessControlAttributeConfiguration", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_instance_access_control_attribute_configuration( + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribeInstanceAccessControlAttributeConfiguration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_permission_set(instance_arn, permission_set_arn) + describe_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Gets the details of the permission set. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set. + +""" +function describe_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "DescribePermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_permission_set( + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribePermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_permission_set_provisioning_status(instance_arn, provision_permission_set_request_id) + describe_permission_set_provisioning_status(instance_arn, provision_permission_set_request_id, params::Dict{String,<:Any}) + +Describes the status for the given permission set provisioning request. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `provision_permission_set_request_id`: The identifier that is provided by the + ProvisionPermissionSet call to retrieve the current status of the provisioning workflow. + +""" +function describe_permission_set_provisioning_status( + InstanceArn, + ProvisionPermissionSetRequestId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribePermissionSetProvisioningStatus", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "ProvisionPermissionSetRequestId" => ProvisionPermissionSetRequestId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_permission_set_provisioning_status( + InstanceArn, + ProvisionPermissionSetRequestId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribePermissionSetProvisioningStatus", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "ProvisionPermissionSetRequestId" => ProvisionPermissionSetRequestId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_trusted_token_issuer(trusted_token_issuer_arn) + describe_trusted_token_issuer(trusted_token_issuer_arn, params::Dict{String,<:Any}) + +Retrieves details about a trusted token issuer configuration stored in an instance of IAM +Identity Center. Details include the name of the trusted token issuer, the issuer URL, and +the path of the source attribute and the destination attribute for a trusted token issuer +configuration. + +# Arguments +- `trusted_token_issuer_arn`: Specifies the ARN of the trusted token issuer configuration + that you want details about. + +""" +function describe_trusted_token_issuer( + TrustedTokenIssuerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "DescribeTrustedTokenIssuer", + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_trusted_token_issuer( + TrustedTokenIssuerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DescribeTrustedTokenIssuer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + detach_customer_managed_policy_reference_from_permission_set(customer_managed_policy_reference, instance_arn, permission_set_arn) + detach_customer_managed_policy_reference_from_permission_set(customer_managed_policy_reference, instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Detaches the specified customer managed policy from the specified PermissionSet. + +# Arguments +- `customer_managed_policy_reference`: Specifies the name and path of a customer managed + policy. You must have an IAM policy that matches the name and path in each Amazon Web + Services account where you want to deploy your permission set. +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. +- `permission_set_arn`: The ARN of the PermissionSet. + +""" +function detach_customer_managed_policy_reference_from_permission_set( + CustomerManagedPolicyReference, + InstanceArn, + PermissionSetArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DetachCustomerManagedPolicyReferenceFromPermissionSet", + Dict{String,Any}( + "CustomerManagedPolicyReference" => CustomerManagedPolicyReference, + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function detach_customer_managed_policy_reference_from_permission_set( + CustomerManagedPolicyReference, + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DetachCustomerManagedPolicyReferenceFromPermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CustomerManagedPolicyReference" => CustomerManagedPolicyReference, + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + detach_managed_policy_from_permission_set(instance_arn, managed_policy_arn, permission_set_arn) + detach_managed_policy_from_permission_set(instance_arn, managed_policy_arn, permission_set_arn, params::Dict{String,<:Any}) + +Detaches the attached Amazon Web Services managed policy ARN from the specified permission +set. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `managed_policy_arn`: The Amazon Web Services managed policy ARN to be detached from a + permission set. +- `permission_set_arn`: The ARN of the PermissionSet from which the policy should be + detached. + +""" +function detach_managed_policy_from_permission_set( + InstanceArn, + ManagedPolicyArn, + PermissionSetArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DetachManagedPolicyFromPermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "ManagedPolicyArn" => ManagedPolicyArn, + "PermissionSetArn" => PermissionSetArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function detach_managed_policy_from_permission_set( + InstanceArn, + ManagedPolicyArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "DetachManagedPolicyFromPermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "ManagedPolicyArn" => ManagedPolicyArn, + "PermissionSetArn" => PermissionSetArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_application_access_scope(application_arn, scope) + get_application_access_scope(application_arn, scope, params::Dict{String,<:Any}) + +Retrieves the authorized targets for an IAM Identity Center access scope for an application. + +# Arguments +- `application_arn`: Specifies the ARN of the application with the access scope that you + want to retrieve. +- `scope`: Specifies the name of the access scope for which you want the authorized targets. + +""" +function get_application_access_scope( + ApplicationArn, Scope; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "GetApplicationAccessScope", + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application_access_scope( + ApplicationArn, + Scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetApplicationAccessScope", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_application_assignment_configuration(application_arn) + get_application_assignment_configuration(application_arn, params::Dict{String,<:Any}) + +Retrieves the configuration of PutApplicationAssignmentConfiguration. + +# Arguments +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. + +""" +function get_application_assignment_configuration( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "GetApplicationAssignmentConfiguration", + Dict{String,Any}("ApplicationArn" => ApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application_assignment_configuration( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetApplicationAssignmentConfiguration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_application_authentication_method(application_arn, authentication_method_type) + get_application_authentication_method(application_arn, authentication_method_type, params::Dict{String,<:Any}) + +Retrieves details about an authentication method used by an application. + +# Arguments +- `application_arn`: Specifies the ARN of the application. +- `authentication_method_type`: Specifies the type of authentication method for which you + want details. + +""" +function get_application_authentication_method( + ApplicationArn, + AuthenticationMethodType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetApplicationAuthenticationMethod", + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "AuthenticationMethodType" => AuthenticationMethodType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application_authentication_method( + ApplicationArn, + AuthenticationMethodType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetApplicationAuthenticationMethod", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "AuthenticationMethodType" => AuthenticationMethodType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_application_grant(application_arn, grant_type) + get_application_grant(application_arn, grant_type, params::Dict{String,<:Any}) + +Retrieves details about an application grant. + +# Arguments +- `application_arn`: Specifies the ARN of the application that contains the grant. +- `grant_type`: Specifies the type of grant. + +""" +function get_application_grant( + ApplicationArn, GrantType; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "GetApplicationGrant", + Dict{String,Any}("ApplicationArn" => ApplicationArn, "GrantType" => GrantType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_application_grant( + ApplicationArn, + GrantType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetApplicationGrant", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, "GrantType" => GrantType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_inline_policy_for_permission_set(instance_arn, permission_set_arn) + get_inline_policy_for_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Obtains the inline policy assigned to the permission set. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set. + +""" +function get_inline_policy_for_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "GetInlinePolicyForPermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_inline_policy_for_permission_set( + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetInlinePolicyForPermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_permissions_boundary_for_permission_set(instance_arn, permission_set_arn) + get_permissions_boundary_for_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Obtains the permissions boundary for a specified PermissionSet. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. +- `permission_set_arn`: The ARN of the PermissionSet. + +""" +function get_permissions_boundary_for_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "GetPermissionsBoundaryForPermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_permissions_boundary_for_permission_set( + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "GetPermissionsBoundaryForPermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_account_assignment_creation_status(instance_arn) + list_account_assignment_creation_status(instance_arn, params::Dict{String,<:Any}) + +Lists the status of the Amazon Web Services account assignment creation requests for a +specified IAM Identity Center instance. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: Filters results based on the passed attribute value. +- `"MaxResults"`: The maximum number of results to display for the assignment. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +""" +function list_account_assignment_creation_status( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListAccountAssignmentCreationStatus", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_account_assignment_creation_status( + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignmentCreationStatus", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_account_assignment_deletion_status(instance_arn) + list_account_assignment_deletion_status(instance_arn, params::Dict{String,<:Any}) + +Lists the status of the Amazon Web Services account assignment deletion requests for a +specified IAM Identity Center instance. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: Filters results based on the passed attribute value. +- `"MaxResults"`: The maximum number of results to display for the assignment. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +""" +function list_account_assignment_deletion_status( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListAccountAssignmentDeletionStatus", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_account_assignment_deletion_status( + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignmentDeletionStatus", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_account_assignments(account_id, instance_arn, permission_set_arn) + list_account_assignments(account_id, instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Lists the assignee of the specified Amazon Web Services account with the specified +permission set. + +# Arguments +- `account_id`: The identifier of the Amazon Web Services account from which to list the + assignments. +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set from which to list assignments. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to display for the assignment. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +""" +function list_account_assignments( + AccountId, + InstanceArn, + PermissionSetArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignments", + Dict{String,Any}( + "AccountId" => AccountId, + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_account_assignments( + AccountId, + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignments", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AccountId" => AccountId, + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_account_assignments_for_principal(instance_arn, principal_id, principal_type) + list_account_assignments_for_principal(instance_arn, principal_id, principal_type, params::Dict{String,<:Any}) + +Retrieves a list of the IAM Identity Center associated Amazon Web Services accounts that +the principal has access to. + +# Arguments +- `instance_arn`: Specifies the ARN of the instance of IAM Identity Center that contains + the principal. +- `principal_id`: Specifies the principal for which you want to retrieve the list of + account assignments. +- `principal_type`: Specifies the type of the principal. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: Specifies an Amazon Web Services account ID number. Results are filtered to + only those that match this ID number. +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_account_assignments_for_principal( + InstanceArn, + PrincipalId, + PrincipalType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignmentsForPrincipal", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_account_assignments_for_principal( + InstanceArn, + PrincipalId, + PrincipalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountAssignmentsForPrincipal", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_accounts_for_provisioned_permission_set(instance_arn, permission_set_arn) + list_accounts_for_provisioned_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + +Lists all the Amazon Web Services accounts where the specified permission set is +provisioned. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the PermissionSet from which the associated Amazon Web + Services accounts will be listed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to display for the PermissionSet. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +- `"ProvisioningStatus"`: The permission set provisioning status for an Amazon Web Services + account. +""" +function list_accounts_for_provisioned_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListAccountsForProvisionedPermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_accounts_for_provisioned_permission_set( + InstanceArn, + PermissionSetArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListAccountsForProvisionedPermissionSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_access_scopes(application_arn) + list_application_access_scopes(application_arn, params::Dict{String,<:Any}) + +Lists the access scopes and authorized targets associated with an application. + +# Arguments +- `application_arn`: Specifies the ARN of the application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_access_scopes( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListApplicationAccessScopes", + Dict{String,Any}("ApplicationArn" => ApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_access_scopes( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationAccessScopes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_assignments(application_arn) + list_application_assignments(application_arn, params::Dict{String,<:Any}) + +Lists Amazon Web Services account users that are assigned to an application. + +# Arguments +- `application_arn`: Specifies the ARN of the application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_assignments( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListApplicationAssignments", + Dict{String,Any}("ApplicationArn" => ApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_assignments( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationAssignments", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_assignments_for_principal(instance_arn, principal_id, principal_type) + list_application_assignments_for_principal(instance_arn, principal_id, principal_type, params::Dict{String,<:Any}) + +Lists the applications to which a specified principal is assigned. + +# Arguments +- `instance_arn`: Specifies the instance of IAM Identity Center that contains principal and + applications. +- `principal_id`: Specifies the unique identifier of the principal for which you want to + retrieve its assignments. +- `principal_type`: Specifies the type of the principal for which you want to retrieve its + assignments. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: Filters the output to include only assignments associated with the + application that has the specified ARN. +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_assignments_for_principal( + InstanceArn, + PrincipalId, + PrincipalType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationAssignmentsForPrincipal", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_assignments_for_principal( + InstanceArn, + PrincipalId, + PrincipalType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationAssignmentsForPrincipal", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PrincipalId" => PrincipalId, + "PrincipalType" => PrincipalType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_authentication_methods(application_arn) + list_application_authentication_methods(application_arn, params::Dict{String,<:Any}) + +Lists all of the authentication methods supported by the specified application. + +# Arguments +- `application_arn`: Specifies the ARN of the application with the authentication methods + you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_authentication_methods( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListApplicationAuthenticationMethods", + Dict{String,Any}("ApplicationArn" => ApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_authentication_methods( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationAuthenticationMethods", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_grants(application_arn) + list_application_grants(application_arn, params::Dict{String,<:Any}) + +List the grants associated with an application. + +# Arguments +- `application_arn`: Specifies the ARN of the application whose grants you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_grants( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListApplicationGrants", + Dict{String,Any}("ApplicationArn" => ApplicationArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_account_assignment_deletion_status( +function list_application_grants( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListApplicationGrants", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_application_providers() + list_application_providers(params::Dict{String,<:Any}) + +Lists the application providers configured in the IAM Identity Center identity store. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_application_providers(; aws_config::AbstractAWSConfig=global_aws_config()) + return sso_admin( + "ListApplicationProviders"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_application_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListApplicationProviders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_applications(instance_arn) + list_applications(instance_arn, params::Dict{String,<:Any}) + +Lists all applications associated with the instance of IAM Identity Center. When listing +applications for an instance in the management account, member accounts must use the +applicationAccount parameter to filter the list to only applications created from that +account. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center application under which the operation + will run. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web + Services Service Namespaces in the Amazon Web Services General Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: Filters response results. +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_applications(InstanceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return sso_admin( + "ListApplications", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_applications( InstanceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListAccountAssignmentDeletionStatus", + "ListApplications", Dict{String,Any}( mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); @@ -1119,57 +2611,47 @@ function list_account_assignment_deletion_status( end """ - list_account_assignments(account_id, instance_arn, permission_set_arn) - list_account_assignments(account_id, instance_arn, permission_set_arn, params::Dict{String,<:Any}) + list_customer_managed_policy_references_in_permission_set(instance_arn, permission_set_arn) + list_customer_managed_policy_references_in_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) -Lists the assignee of the specified AWS account with the specified permission set. +Lists all customer managed policies attached to a specified PermissionSet. # Arguments -- `account_id`: The identifier of the AWS account from which to list the assignments. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set from which to list assignments. + will be executed. +- `permission_set_arn`: The ARN of the PermissionSet. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the assignment. +- `"MaxResults"`: The maximum number of results to display for the list call. - `"NextToken"`: The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. """ -function list_account_assignments( - AccountId, - InstanceArn, - PermissionSetArn; - aws_config::AbstractAWSConfig=global_aws_config(), +function list_customer_managed_policy_references_in_permission_set( + InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListAccountAssignments", + "ListCustomerManagedPolicyReferencesInPermissionSet", Dict{String,Any}( - "AccountId" => AccountId, - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_account_assignments( - AccountId, +function list_customer_managed_policy_references_in_permission_set( InstanceArn, PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListAccountAssignments", + "ListCustomerManagedPolicyReferencesInPermissionSet", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "AccountId" => AccountId, - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, + "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ), params, ), @@ -1180,30 +2662,54 @@ function list_account_assignments( end """ - list_accounts_for_provisioned_permission_set(instance_arn, permission_set_arn) - list_accounts_for_provisioned_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + list_instances() + list_instances(params::Dict{String,<:Any}) + +Lists the details of the organization and account instances of IAM Identity Center that +were created in or visible to the account calling this API. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to display for the instance. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +""" +function list_instances(; aws_config::AbstractAWSConfig=global_aws_config()) + return sso_admin( + "ListInstances"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_instances( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListInstances", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_managed_policies_in_permission_set(instance_arn, permission_set_arn) + list_managed_policies_in_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) -Lists all the AWS accounts where the specified permission set is provisioned. +Lists the Amazon Web Services managed policy that is attached to a specified permission set. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the PermissionSet from which the associated AWS accounts - will be listed. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the PermissionSet whose managed policies will be listed. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to display for the PermissionSet. - `"NextToken"`: The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. -- `"ProvisioningStatus"`: The permission set provisioning status for an AWS account. """ -function list_accounts_for_provisioned_permission_set( +function list_managed_policies_in_permission_set( InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListAccountsForProvisionedPermissionSet", + "ListManagedPoliciesInPermissionSet", Dict{String,Any}( "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn ); @@ -1211,14 +2717,14 @@ function list_accounts_for_provisioned_permission_set( feature_set=SERVICE_FEATURE_SET, ) end -function list_accounts_for_provisioned_permission_set( +function list_managed_policies_in_permission_set( InstanceArn, PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListAccountsForProvisionedPermissionSet", + "ListManagedPoliciesInPermissionSet", Dict{String,Any}( mergewith( _merge, @@ -1234,50 +2740,85 @@ function list_accounts_for_provisioned_permission_set( end """ - list_customer_managed_policy_references_in_permission_set(instance_arn, permission_set_arn) - list_customer_managed_policy_references_in_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + list_permission_set_provisioning_status(instance_arn) + list_permission_set_provisioning_status(instance_arn, params::Dict{String,<:Any}) -Lists all customer managed policies attached to a specified PermissionSet. +Lists the status of the permission set provisioning requests for a specified IAM Identity +Center instance. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. -- `permission_set_arn`: The ARN of the PermissionSet. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the list call. +- `"Filter"`: Filters results based on the passed attribute value. +- `"MaxResults"`: The maximum number of results to display for the assignment. - `"NextToken"`: The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. """ -function list_customer_managed_policy_references_in_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function list_permission_set_provisioning_status( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListCustomerManagedPolicyReferencesInPermissionSet", + "ListPermissionSetProvisioningStatus", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_permission_set_provisioning_status( + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListPermissionSetProvisioningStatus", Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_customer_managed_policy_references_in_permission_set( + +""" + list_permission_sets(instance_arn) + list_permission_sets(instance_arn, params::Dict{String,<:Any}) + +Lists the PermissionSets in an IAM Identity Center instance. + +# Arguments +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to display for the assignment. +- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use + the output of previous API calls to make subsequent calls. +""" +function list_permission_sets( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListPermissionSets", + Dict{String,Any}("InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_permission_sets( InstanceArn, - PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListCustomerManagedPolicyReferencesInPermissionSet", + "ListPermissionSets", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ), - params, - ), + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1285,76 +2826,141 @@ function list_customer_managed_policy_references_in_permission_set( end """ - list_instances() - list_instances(params::Dict{String,<:Any}) + list_permission_sets_provisioned_to_account(account_id, instance_arn) + list_permission_sets_provisioned_to_account(account_id, instance_arn, params::Dict{String,<:Any}) + +Lists all the permission sets that are provisioned to a specified Amazon Web Services +account. -Lists the IAM Identity Center instances that the caller has access to. +# Arguments +- `account_id`: The identifier of the Amazon Web Services account from which to list the + assignments. +- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the instance. +- `"MaxResults"`: The maximum number of results to display for the assignment. - `"NextToken"`: The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. +- `"ProvisioningStatus"`: The status object for the permission set provisioning operation. """ -function list_instances(; aws_config::AbstractAWSConfig=global_aws_config()) +function list_permission_sets_provisioned_to_account( + AccountId, InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +) return sso_admin( - "ListInstances"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "ListPermissionSetsProvisionedToAccount", + Dict{String,Any}("AccountId" => AccountId, "InstanceArn" => InstanceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end -function list_instances( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +function list_permission_sets_provisioned_to_account( + AccountId, + InstanceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListInstances", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "ListPermissionSetsProvisionedToAccount", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AccountId" => AccountId, "InstanceArn" => InstanceArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end """ - list_managed_policies_in_permission_set(instance_arn, permission_set_arn) - list_managed_policies_in_permission_set(instance_arn, permission_set_arn, params::Dict{String,<:Any}) + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Lists the AWS managed policy that is attached to a specified permission set. +Lists the tags that are attached to a specified resource. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the PermissionSet whose managed policies will be listed. +- `resource_arn`: The ARN of the resource with the tags to be listed. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the PermissionSet. +- `"InstanceArn"`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `"NextToken"`: The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. """ -function list_managed_policies_in_permission_set( - InstanceArn, PermissionSetArn; aws_config::AbstractAWSConfig=global_aws_config() +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "ListTagsForResource", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_trusted_token_issuers(instance_arn) + list_trusted_token_issuers(instance_arn, params::Dict{String,<:Any}) + +Lists all the trusted token issuers configured in an instance of IAM Identity Center. + +# Arguments +- `instance_arn`: Specifies the ARN of the instance of IAM Identity Center with the trusted + token issuer configurations that you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. +- `"NextToken"`: Specifies that you want to receive the next page of results. Valid only if + you received a NextToken response in the previous request. If you did, it indicates that + more output is available. Set this parameter to the value provided by the previous call's + NextToken response to request the next page of results. +""" +function list_trusted_token_issuers( + InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListManagedPoliciesInPermissionSet", - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ); + "ListTrustedTokenIssuers", + Dict{String,Any}("InstanceArn" => InstanceArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_managed_policies_in_permission_set( +function list_trusted_token_issuers( InstanceArn, - PermissionSetArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListManagedPoliciesInPermissionSet", + "ListTrustedTokenIssuers", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, "PermissionSetArn" => PermissionSetArn - ), - params, - ), + mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1362,43 +2968,59 @@ function list_managed_policies_in_permission_set( end """ - list_permission_set_provisioning_status(instance_arn) - list_permission_set_provisioning_status(instance_arn, params::Dict{String,<:Any}) + provision_permission_set(instance_arn, permission_set_arn, target_type) + provision_permission_set(instance_arn, permission_set_arn, target_type, params::Dict{String,<:Any}) -Lists the status of the permission set provisioning requests for a specified IAM Identity -Center instance. +The process by which a specified permission set is provisioned to the specified target. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. +- `permission_set_arn`: The ARN of the permission set. +- `target_type`: The entity type for which the assignment will be created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: Filters results based on the passed attribute value. -- `"MaxResults"`: The maximum number of results to display for the assignment. -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. +- `"TargetId"`: TargetID is an Amazon Web Services account identifier, (For example, + 123456789012). """ -function list_permission_set_provisioning_status( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function provision_permission_set( + InstanceArn, + PermissionSetArn, + TargetType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListPermissionSetProvisioningStatus", - Dict{String,Any}("InstanceArn" => InstanceArn); + "ProvisionPermissionSet", + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + "TargetType" => TargetType, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_permission_set_provisioning_status( +function provision_permission_set( InstanceArn, + PermissionSetArn, + TargetType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListPermissionSetProvisioningStatus", + "ProvisionPermissionSet", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + mergewith( + _merge, + Dict{String,Any}( + "InstanceArn" => InstanceArn, + "PermissionSetArn" => PermissionSetArn, + "TargetType" => TargetType, + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1406,41 +3028,47 @@ function list_permission_set_provisioning_status( end """ - list_permission_sets(instance_arn) - list_permission_sets(instance_arn, params::Dict{String,<:Any}) + put_application_access_scope(application_arn, scope) + put_application_access_scope(application_arn, scope, params::Dict{String,<:Any}) -Lists the PermissionSets in an IAM Identity Center instance. +Adds or updates the list of authorized targets for an IAM Identity Center access scope for +an application. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. +- `application_arn`: Specifies the ARN of the application with the access scope with the + targets to add or update. +- `scope`: Specifies the name of the access scope to be associated with the specified + targets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the assignment. -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. +- `"AuthorizedTargets"`: Specifies an array list of ARNs that represent the authorized + targets for this access scope. """ -function list_permission_sets( - InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function put_application_access_scope( + ApplicationArn, Scope; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListPermissionSets", - Dict{String,Any}("InstanceArn" => InstanceArn); + "PutApplicationAccessScope", + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_permission_sets( - InstanceArn, +function put_application_access_scope( + ApplicationArn, + Scope, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListPermissionSets", + "PutApplicationAccessScope", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("InstanceArn" => InstanceArn), params) + mergewith( + _merge, + Dict{String,Any}("ApplicationArn" => ApplicationArn, "Scope" => Scope), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1448,46 +3076,51 @@ function list_permission_sets( end """ - list_permission_sets_provisioned_to_account(account_id, instance_arn) - list_permission_sets_provisioned_to_account(account_id, instance_arn, params::Dict{String,<:Any}) + put_application_assignment_configuration(application_arn, assignment_required) + put_application_assignment_configuration(application_arn, assignment_required, params::Dict{String,<:Any}) -Lists all the permission sets that are provisioned to a specified AWS account. +Configure how users gain access to an application. If AssignmentsRequired is true (default +value), users don’t have access to the application unless an assignment is created using +the CreateApplicationAssignment API. If false, all users have access to the application. If +an assignment is created using CreateApplicationAssignment., the user retains access if +AssignmentsRequired is set to true. # Arguments -- `account_id`: The identifier of the AWS account from which to list the assignments. -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. +- `assignment_required`: If AssignmentsRequired is true (default value), users don’t have + access to the application unless an assignment is created using the + CreateApplicationAssignment API. If false, all users have access to the application. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximum number of results to display for the assignment. -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. -- `"ProvisioningStatus"`: The status object for the permission set provisioning operation. """ -function list_permission_sets_provisioned_to_account( - AccountId, InstanceArn; aws_config::AbstractAWSConfig=global_aws_config() +function put_application_assignment_configuration( + ApplicationArn, AssignmentRequired; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ListPermissionSetsProvisionedToAccount", - Dict{String,Any}("AccountId" => AccountId, "InstanceArn" => InstanceArn); + "PutApplicationAssignmentConfiguration", + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, "AssignmentRequired" => AssignmentRequired + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_permission_sets_provisioned_to_account( - AccountId, - InstanceArn, +function put_application_assignment_configuration( + ApplicationArn, + AssignmentRequired, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListPermissionSetsProvisionedToAccount", + "PutApplicationAssignmentConfiguration", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("AccountId" => AccountId, "InstanceArn" => InstanceArn), + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "AssignmentRequired" => AssignmentRequired, + ), params, ), ); @@ -1497,45 +3130,54 @@ function list_permission_sets_provisioned_to_account( end """ - list_tags_for_resource(instance_arn, resource_arn) - list_tags_for_resource(instance_arn, resource_arn, params::Dict{String,<:Any}) + put_application_authentication_method(application_arn, authentication_method, authentication_method_type) + put_application_authentication_method(application_arn, authentication_method, authentication_method_type, params::Dict{String,<:Any}) -Lists the tags that are attached to a specified resource. +Adds or updates an authentication method for an application. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `resource_arn`: The ARN of the resource with the tags to be listed. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"NextToken"`: The pagination token for the list API. Initially the value is null. Use - the output of previous API calls to make subsequent calls. -""" -function list_tags_for_resource( - InstanceArn, ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +- `application_arn`: Specifies the ARN of the application with the authentication method to + add or update. +- `authentication_method`: Specifies a structure that describes the authentication method + to add or update. The structure type you provide is determined by the + AuthenticationMethodType parameter. +- `authentication_method_type`: Specifies the type of the authentication method that you + want to add or update. + +""" +function put_application_authentication_method( + ApplicationArn, + AuthenticationMethod, + AuthenticationMethodType; + aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListTagsForResource", - Dict{String,Any}("InstanceArn" => InstanceArn, "ResourceArn" => ResourceArn); + "PutApplicationAuthenticationMethod", + Dict{String,Any}( + "ApplicationArn" => ApplicationArn, + "AuthenticationMethod" => AuthenticationMethod, + "AuthenticationMethodType" => AuthenticationMethodType, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function list_tags_for_resource( - InstanceArn, - ResourceArn, +function put_application_authentication_method( + ApplicationArn, + AuthenticationMethod, + AuthenticationMethodType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ListTagsForResource", + "PutApplicationAuthenticationMethod", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, "ResourceArn" => ResourceArn + "ApplicationArn" => ApplicationArn, + "AuthenticationMethod" => AuthenticationMethod, + "AuthenticationMethodType" => AuthenticationMethodType, ), params, ), @@ -1546,56 +3188,45 @@ function list_tags_for_resource( end """ - provision_permission_set(instance_arn, permission_set_arn, target_type) - provision_permission_set(instance_arn, permission_set_arn, target_type, params::Dict{String,<:Any}) + put_application_grant(application_arn, grant, grant_type) + put_application_grant(application_arn, grant, grant_type, params::Dict{String,<:Any}) -The process by which a specified permission set is provisioned to the specified target. +Adds a grant to an application. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. -- `permission_set_arn`: The ARN of the permission set. -- `target_type`: The entity type for which the assignment will be created. +- `application_arn`: Specifies the ARN of the application to update. +- `grant`: Specifies a structure that describes the grant to update. +- `grant_type`: Specifies the type of grant to update. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"TargetId"`: TargetID is an AWS account identifier, typically a 10-12 digit string (For - example, 123456789012). """ -function provision_permission_set( - InstanceArn, - PermissionSetArn, - TargetType; - aws_config::AbstractAWSConfig=global_aws_config(), +function put_application_grant( + ApplicationArn, Grant, GrantType; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( - "ProvisionPermissionSet", + "PutApplicationGrant", Dict{String,Any}( - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, - "TargetType" => TargetType, + "ApplicationArn" => ApplicationArn, "Grant" => Grant, "GrantType" => GrantType ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function provision_permission_set( - InstanceArn, - PermissionSetArn, - TargetType, +function put_application_grant( + ApplicationArn, + Grant, + GrantType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sso_admin( - "ProvisionPermissionSet", + "PutApplicationGrant", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "InstanceArn" => InstanceArn, - "PermissionSetArn" => PermissionSetArn, - "TargetType" => TargetType, + "ApplicationArn" => ApplicationArn, + "Grant" => Grant, + "GrantType" => GrantType, ), params, ), @@ -1616,8 +3247,8 @@ this action to apply the corresponding IAM policy updates to all assigned accoun # Arguments - `inline_policy`: The inline policy to attach to a PermissionSet. - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `permission_set_arn`: The ARN of the permission set. """ @@ -1667,8 +3298,8 @@ end put_permissions_boundary_to_permission_set(instance_arn, permission_set_arn, permissions_boundary) put_permissions_boundary_to_permission_set(instance_arn, permission_set_arn, permissions_boundary, params::Dict{String,<:Any}) -Attaches an AWS managed or customer managed policy to the specified PermissionSet as a -permissions boundary. +Attaches an Amazon Web Services managed or customer managed policy to the specified +PermissionSet as a permissions boundary. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation @@ -1721,33 +3352,30 @@ function put_permissions_boundary_to_permission_set( end """ - tag_resource(instance_arn, resource_arn, tags) - tag_resource(instance_arn, resource_arn, tags, params::Dict{String,<:Any}) + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) Associates a set of tags with a specified resource. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. - `resource_arn`: The ARN of the resource with the tags to be listed. - `tags`: A set of key-value pairs that are used to manage the resource. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceArn"`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. """ -function tag_resource( - InstanceArn, ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config() -) +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) return sso_admin( "TagResource", - Dict{String,Any}( - "InstanceArn" => InstanceArn, "ResourceArn" => ResourceArn, "Tags" => Tags - ); + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function tag_resource( - InstanceArn, ResourceArn, Tags, params::AbstractDict{String}; @@ -1758,11 +3386,7 @@ function tag_resource( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ResourceArn" => ResourceArn, - "Tags" => Tags, - ), + Dict{String,Any}("ResourceArn" => ResourceArn, "Tags" => Tags), params, ), ); @@ -1772,33 +3396,32 @@ function tag_resource( end """ - untag_resource(instance_arn, resource_arn, tag_keys) - untag_resource(instance_arn, resource_arn, tag_keys, params::Dict{String,<:Any}) + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) Disassociates a set of tags from a specified resource. # Arguments -- `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. - `resource_arn`: The ARN of the resource with the tags to be listed. - `tag_keys`: The keys of tags that are attached to the resource. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceArn"`: The ARN of the IAM Identity Center instance under which the operation + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. """ function untag_resource( - InstanceArn, ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() + ResourceArn, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() ) return sso_admin( "UntagResource", - Dict{String,Any}( - "InstanceArn" => InstanceArn, "ResourceArn" => ResourceArn, "TagKeys" => TagKeys - ); + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function untag_resource( - InstanceArn, ResourceArn, TagKeys, params::AbstractDict{String}; @@ -1809,11 +3432,95 @@ function untag_resource( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "InstanceArn" => InstanceArn, - "ResourceArn" => ResourceArn, - "TagKeys" => TagKeys, - ), + Dict{String,Any}("ResourceArn" => ResourceArn, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_application(application_arn) + update_application(application_arn, params::Dict{String,<:Any}) + +Updates application properties. + +# Arguments +- `application_arn`: Specifies the ARN of the application. For more information about ARNs, + see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon + Web Services General Reference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The description of the . +- `"Name"`: Specifies the updated name for the application. +- `"PortalOptions"`: A structure that describes the options for the portal associated with + an application. +- `"Status"`: Specifies whether the application is enabled or disabled. +""" +function update_application( + ApplicationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "UpdateApplication", + Dict{String,Any}("ApplicationArn" => ApplicationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_application( + ApplicationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "UpdateApplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationArn" => ApplicationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_instance(instance_arn, name) + update_instance(instance_arn, name, params::Dict{String,<:Any}) + +Update the details for the instance of IAM Identity Center that is owned by the Amazon Web +Services account. + +# Arguments +- `instance_arn`: The ARN of the instance of IAM Identity Center under which the operation + will run. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web + Services Service Namespaces in the Amazon Web Services General Reference. +- `name`: Updates the instance name. + +""" +function update_instance( + InstanceArn, Name; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "UpdateInstance", + Dict{String,Any}("InstanceArn" => InstanceArn, "Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_instance( + InstanceArn, + Name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "UpdateInstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InstanceArn" => InstanceArn, "Name" => Name), params, ), ); @@ -1889,8 +3596,8 @@ Updates an existing permission set. # Arguments - `instance_arn`: The ARN of the IAM Identity Center instance under which the operation - will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - Service Namespaces in the AWS General Reference. + will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and + Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. - `permission_set_arn`: The ARN of the permission set. # Optional Parameters @@ -1934,3 +3641,53 @@ function update_permission_set( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_trusted_token_issuer(trusted_token_issuer_arn) + update_trusted_token_issuer(trusted_token_issuer_arn, params::Dict{String,<:Any}) + +Updates the name of the trusted token issuer, or the path of a source attribute or +destination attribute for a trusted token issuer configuration. Updating this trusted +token issuer configuration might cause users to lose access to any applications that are +configured to use the trusted token issuer. + +# Arguments +- `trusted_token_issuer_arn`: Specifies the ARN of the trusted token issuer configuration + that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Name"`: Specifies the updated name to be applied to the trusted token issuer + configuration. +- `"TrustedTokenIssuerConfiguration"`: Specifies a structure with settings to apply to the + specified trusted token issuer. The settings that you can provide are determined by the + type of the trusted token issuer that you are updating. +""" +function update_trusted_token_issuer( + TrustedTokenIssuerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_admin( + "UpdateTrustedTokenIssuer", + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_trusted_token_issuer( + TrustedTokenIssuerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_admin( + "UpdateTrustedTokenIssuer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("TrustedTokenIssuerArn" => TrustedTokenIssuerArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/sso_oidc.jl b/src/services/sso_oidc.jl index 9fcae3fd37..9b8a15b740 100644 --- a/src/services/sso_oidc.jl +++ b/src/services/sso_oidc.jl @@ -8,35 +8,43 @@ using AWS.UUIDs create_token(client_id, client_secret, grant_type) create_token(client_id, client_secret, grant_type, params::Dict{String,<:Any}) -Creates and returns an access token for the authorized client. The access token issued will -be used to fetch short-term credentials for the assigned roles in the AWS account. +Creates and returns access and refresh tokens for clients that are authenticated using +client secrets. The access token can be used to fetch short-term credentials for the +assigned AWS accounts or to access application APIs using bearer authentication. # Arguments -- `client_id`: The unique identifier string for each client. This value should come from - the persisted result of the RegisterClient API. +- `client_id`: The unique identifier string for the client or application. This value comes + from the result of the RegisterClient API. - `client_secret`: A secret string generated for the client. This value should come from the persisted result of the RegisterClient API. -- `grant_type`: Supports grant types for the authorization code, refresh token, and device - code request. For device code requests, specify the following value: - urn:ietf:params:oauth:grant-type:device_code For information about how to obtain the - device code, see the StartDeviceAuthorization topic. +- `grant_type`: Supports the following OAuth grant types: Device Code and Refresh Token. + Specify either of the following values, depending on the grant type that you want: * Device + Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token For + information about how to obtain the device code, see the StartDeviceAuthorization topic. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"code"`: The authorization code received from the authorization service. This parameter - is required to perform an authorization grant request to get access to a token. -- `"deviceCode"`: Used only when calling this API for the device code grant type. This - short-term code is used to identify this authentication attempt. This should come from an - in-memory reference to the result of the StartDeviceAuthorization API. -- `"redirectUri"`: The location of the application that will receive the authorization - code. Users authorize the service to send the request to this location. -- `"refreshToken"`: Currently, refreshToken is not yet implemented and is not supported. +- `"code"`: Used only when calling this API for the Authorization Code grant type. The + short-term code is used to identify this authorization request. This grant type is + currently unsupported for the CreateToken API. +- `"codeVerifier"`: Used only when calling this API for the Authorization Code grant type. + This value is generated by the client and presented to validate the original code challenge + value the client passed at authorization time. +- `"deviceCode"`: Used only when calling this API for the Device Code grant type. This + short-term code is used to identify this authorization request. This comes from the result + of the StartDeviceAuthorization API. +- `"redirectUri"`: Used only when calling this API for the Authorization Code grant type. + This value specifies the location of the client or application that has registered to + receive the authorization code. +- `"refreshToken"`: Used only when calling this API for the Refresh Token grant type. This + token is used to refresh short-term tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center - OIDC API Reference. The token used to obtain an access token in the event that the access - token is invalid or expired. -- `"scope"`: The list of scopes that is defined by the client. Upon authorization, this - list is used to restrict permissions when granting an access token. + OIDC API Reference. +- `"scope"`: The list of scopes for which authorization is requested. The access token that + is issued is limited to the scopes that are granted. If this value is not specified, IAM + Identity Center authorizes all scopes that are configured for the client during the call to + RegisterClient. """ function create_token( clientId, clientSecret, grantType; aws_config::AbstractAWSConfig=global_aws_config() @@ -77,6 +85,94 @@ function create_token( ) end +""" + create_token_with_iam(client_id, grant_type) + create_token_with_iam(client_id, grant_type, params::Dict{String,<:Any}) + +Creates and returns access and refresh tokens for clients and applications that are +authenticated using IAM entities. The access token can be used to fetch short-term +credentials for the assigned Amazon Web Services accounts or to access application APIs +using bearer authentication. + +# Arguments +- `client_id`: The unique identifier string for the client or application. This value is an + application ARN that has OAuth grants configured. +- `grant_type`: Supports the following OAuth grant types: Authorization Code, Refresh + Token, JWT Bearer, and Token Exchange. Specify one of the following values, depending on + the grant type that you want: * Authorization Code - authorization_code * Refresh Token - + refresh_token * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange + - urn:ietf:params:oauth:grant-type:token-exchange + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assertion"`: Used only when calling this API for the JWT Bearer grant type. This value + specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize a trusted + token issuer, configure the JWT Bearer GrantOptions for the application. +- `"code"`: Used only when calling this API for the Authorization Code grant type. This + short-term code is used to identify this authorization request. The code is obtained + through a redirect from IAM Identity Center to a redirect URI persisted in the + Authorization Code GrantOptions for the application. +- `"codeVerifier"`: Used only when calling this API for the Authorization Code grant type. + This value is generated by the client and presented to validate the original code challenge + value the client passed at authorization time. +- `"redirectUri"`: Used only when calling this API for the Authorization Code grant type. + This value specifies the location of the client or application that has registered to + receive the authorization code. +- `"refreshToken"`: Used only when calling this API for the Refresh Token grant type. This + token is used to refresh short-term tokens, such as the access token, that might expire. + For more information about the features and limitations of the current IAM Identity Center + OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center + OIDC API Reference. +- `"requestedTokenType"`: Used only when calling this API for the Token Exchange grant + type. This value specifies the type of token that the requester can receive. The following + values are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * + Refresh Token - urn:ietf:params:oauth:token-type:refresh_token +- `"scope"`: The list of scopes for which authorization is requested. The access token that + is issued is limited to the scopes that are granted. If the value is not specified, IAM + Identity Center authorizes all scopes configured for the application, including the + following default scopes: openid, aws, sts:identity_context. +- `"subjectToken"`: Used only when calling this API for the Token Exchange grant type. This + value specifies the subject of the exchange. The value of the subject token must be an + access token issued by IAM Identity Center to a different client or application. The access + token must have authorized scopes that indicate the requested application as a target + audience. +- `"subjectTokenType"`: Used only when calling this API for the Token Exchange grant type. + This value specifies the type of token that is passed as the subject of the exchange. The + following value is supported: * Access Token - + urn:ietf:params:oauth:token-type:access_token +""" +function create_token_with_iam( + clientId, grantType; aws_config::AbstractAWSConfig=global_aws_config() +) + return sso_oidc( + "POST", + "/token?aws_iam=t", + Dict{String,Any}("clientId" => clientId, "grantType" => grantType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_token_with_iam( + clientId, + grantType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sso_oidc( + "POST", + "/token?aws_iam=t", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("clientId" => clientId, "grantType" => grantType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_client(client_name, client_type) register_client(client_name, client_type, params::Dict{String,<:Any}) @@ -92,6 +188,17 @@ requests. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"entitledApplicationArn"`: This IAM Identity Center application ARN is used to define + administrator-managed configuration for public client access to resources. At + authorization, the scopes, grants, and redirect URI available to this client will be + restricted by this application resource. +- `"grantTypes"`: The list of OAuth 2.0 grant types that are defined by the client. This + list is used to restrict the token granting flows available to the client. +- `"issuerUrl"`: The IAM Identity Center Issuer URL associated with an instance of IAM + Identity Center. This value is needed for user access to resources through the client. +- `"redirectUris"`: The list of redirect URI that are defined by the client. At completion + of authorization, this list is used to restrict what locations the user agent can be + redirected back to. - `"scopes"`: The list of scopes that are defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token. """ @@ -140,8 +247,8 @@ authorization service. operation. - `client_secret`: A secret string that is generated for the client. This value should come from the persisted result of the RegisterClient API operation. -- `start_url`: The URL for the AWS access portal. For more information, see Using the AWS - access portal in the IAM Identity Center User Guide. +- `start_url`: The URL for the Amazon Web Services access portal. For more information, see + Using the Amazon Web Services access portal in the IAM Identity Center User Guide. """ function start_device_authorization( diff --git a/src/services/storage_gateway.jl b/src/services/storage_gateway.jl index 973e0f61a6..2c229cef71 100644 --- a/src/services/storage_gateway.jl +++ b/src/services/storage_gateway.jl @@ -33,17 +33,16 @@ gateway. Reference. Valid Values: See Storage Gateway endpoints and quotas in the Amazon Web Services General Reference. - `gateway_timezone`: A value that indicates the time zone you want to set for the gateway. - The time zone is of the format \"GMT-hr:mm\" or \"GMT+hr:mm\". For example, GMT-4:00 - indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of - GMT. The time zone is used, for example, for scheduling snapshots and your gateway's - maintenance schedule. + The time zone is of the format \"GMT\", \"GMT-hr:mm\", or \"GMT+hr:mm\". For example, GMT + indicates Greenwich Mean Time without any offset. GMT-4:00 indicates the time is 4 hours + behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for + example, for scheduling snapshots and your gateway's maintenance schedule. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"GatewayType"`: A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. - The default value is CACHED. Valid Values: STORED | CACHED | VTL | VTL_SNOW | FILE_S3 | - FILE_FSX_SMB + The default value is CACHED. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB - `"MediumChangerType"`: The value that indicates the type of medium changer to use for tape gateway. This field is optional. Valid Values: STK-L700 | AWS-Gateway-VTL | IBM-03584L32-0402 @@ -2149,9 +2148,9 @@ end describe_gateway_information(gateway_arn) describe_gateway_information(gateway_arn, params::Dict{String,<:Any}) -Returns metadata about a gateway such as its name, network interfaces, configured time -zone, and the state (whether the gateway is running or not). To specify which gateway to -describe, use the Amazon Resource Name (ARN) of the gateway in your request. +Returns metadata about a gateway such as its name, network interfaces, time zone, status, +and software version. To specify which gateway to describe, use the Amazon Resource Name +(ARN) of the gateway in your request. # Arguments - `gateway_arn`: @@ -2186,8 +2185,9 @@ end describe_maintenance_start_time(gateway_arn) describe_maintenance_start_time(gateway_arn, params::Dict{String,<:Any}) -Returns your gateway's weekly maintenance start time including the day and time of the -week. Note that values are in terms of the gateway's time zone. +Returns your gateway's maintenance window schedule information, with values for monthly or +weekly cadence, specific day and time to begin maintenance, and which types of updates to +apply. Time values returned are for the gateway's time zone. # Arguments - `gateway_arn`: @@ -2491,9 +2491,14 @@ end describe_tapes(gateway_arn) describe_tapes(gateway_arn, params::Dict{String,<:Any}) -Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a -TapeARN is not specified, returns a description of all virtual tapes associated with the -specified gateway. This operation is only supported in the tape gateway type. +Returns a description of virtual tapes that correspond to the specified Amazon Resource +Names (ARNs). If TapeARN is not specified, returns a description of the virtual tapes +associated with the specified gateway. This operation is only supported for the tape +gateway type. The operation supports pagination. By default, the operation returns a +maximum of up to 100 tapes. You can optionally specify the Limit field in the body to limit +the number of tapes in the response. If the number of tapes returned in the response is +truncated, the response includes a Marker field. You can use this Marker value in your +subsequent request to retrieve the next set of tapes. # Arguments - `gateway_arn`: @@ -2788,7 +2793,13 @@ end join_domain(domain_name, gateway_arn, password, user_name, params::Dict{String,<:Any}) Adds a file gateway to an Active Directory domain. This operation is only supported for -file gateways that support the SMB file protocol. +file gateways that support the SMB file protocol. Joining a domain creates an Active +Directory computer account in the default organizational unit, using the gateway's Gateway +ID as the account name (for example, SGW-1234ADE). If your Active Directory environment +requires that you pre-stage accounts to facilitate the join domain process, you will need +to create this account ahead of time. To create the gateway's computer account in an +organizational unit other than the default, you must specify the organizational unit when +joining the domain. # Arguments - `domain_name`: The name of the domain that you want the gateway to join. @@ -2895,8 +2906,8 @@ end list_file_shares(params::Dict{String,<:Any}) Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares -that belong to the calling user account. This operation is only supported for S3 File -Gateways. +that belong to the calling Amazon Web Services account. This operation is only supported +for S3 File Gateways. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3253,14 +3264,14 @@ end notify_when_uploaded(file_share_arn, params::Dict{String,<:Any}) Sends you notification through CloudWatch Events when all files written to your file share -have been uploaded to S3. Amazon S3. Storage Gateway can send a notification through Amazon +have been uploaded to Amazon S3. Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways. For more information, see -Getting file upload notification in the Storage Gateway User Guide. +Getting file upload notification in the Amazon S3 File Gateway User Guide. # Arguments - `file_share_arn`: @@ -3302,23 +3313,23 @@ import files into the S3 File Gateway cache storage. It only updates the cached to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see -Getting notified about file operations in the Storage Gateway User Guide. This operation is -Only supported for S3 File Gateways. When this API is called, it only initiates the refresh -operation. When the API call completes and returns a success code, it doesn't necessarily -mean that the file refresh has completed. You should use the refresh-complete notification -to determine that the operation has completed before you check for new files on the gateway -file share. You can subscribe to be notified through a CloudWatch event when your -RefreshCache operation completes. Throttle limit: This API is asynchronous, so the gateway -will accept no more than two refreshes at any time. We recommend using the refresh-complete -CloudWatch event notification before issuing additional requests. For more information, see -Getting notified about file operations in the Storage Gateway User Guide. Wait at least -60 seconds between consecutive RefreshCache API requests. RefreshCache does not evict -cache entries if invoked consecutively within 60 seconds of a previous RefreshCache -request. If you invoke the RefreshCache API when two requests are already being -processed, any new request will cause an InvalidGatewayRequestException error because too -many requests were sent to the server. The S3 bucket name does not need to be included -when entering the list of folders in the FolderList parameter. For more information, see -Getting notified about file operations in the Storage Gateway User Guide. +Getting notified about file operations in the Amazon S3 File Gateway User Guide. This +operation is Only supported for S3 File Gateways. When this API is called, it only +initiates the refresh operation. When the API call completes and returns a success code, it +doesn't necessarily mean that the file refresh has completed. You should use the +refresh-complete notification to determine that the operation has completed before you +check for new files on the gateway file share. You can subscribe to be notified through a +CloudWatch event when your RefreshCache operation completes. Throttle limit: This API is +asynchronous, so the gateway will accept no more than two refreshes at any time. We +recommend using the refresh-complete CloudWatch event notification before issuing +additional requests. For more information, see Getting notified about file operations in +the Amazon S3 File Gateway User Guide. Wait at least 60 seconds between consecutive +RefreshCache API requests. If you invoke the RefreshCache API when two requests are +already being processed, any new request will cause an InvalidGatewayRequestException error +because too many requests were sent to the server. The S3 bucket name does not need to +be included when entering the list of folders in the FolderList parameter. For more +information, see Getting notified about file operations in the Amazon S3 File Gateway User +Guide. # Arguments - `file_share_arn`: The Amazon Resource Name (ARN) of the file share you want to refresh. @@ -3328,7 +3339,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"FolderList"`: A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access - to is refreshed. + to is refreshed. Do not include / when specifying folder names. For example, you would + specify samplefolder rather than samplefolder/. - `"Recursive"`: A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to true, each folder that is listed in FolderList @@ -3639,19 +3651,20 @@ end shutdown_gateway(gateway_arn) shutdown_gateway(gateway_arn, params::Dict{String,<:Any}) -Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name -(ARN) of the gateway in the body of your request. The operation shuts down the gateway -service component running in the gateway's virtual machine (VM) and not the host VM. If -you want to shut down the VM, it is recommended that you first shut down the gateway -component in the VM to avoid unpredictable conditions. After the gateway is shutdown, you -cannot call any other API except StartGateway, DescribeGatewayInformation, and -ListGateways. For more information, see ActivateGateway. Your applications cannot read from -or write to the gateway's storage volumes, and there are no snapshots taken. When you make -a shutdown request, you will get a 200 OK success response immediately. However, it might -take some time for the gateway to shut down. You can call the DescribeGatewayInformation -API to check the status. For more information, see ActivateGateway. If do not intend to -use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay -software charges associated with the gateway. +Shuts down a Tape Gateway or Volume Gateway. To specify which gateway to shut down, use the +Amazon Resource Name (ARN) of the gateway in the body of your request. This API action +cannot be used to shut down S3 File Gateway or FSx File Gateway. The operation shuts down +the gateway service component running in the gateway's virtual machine (VM) and not the +host VM. If you want to shut down the VM, it is recommended that you first shut down the +gateway component in the VM to avoid unpredictable conditions. After the gateway is +shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, +and ListGateways. For more information, see ActivateGateway. Your applications cannot read +from or write to the gateway's storage volumes, and there are no snapshots taken. When you +make a shutdown request, you will get a 200 OK success response immediately. However, it +might take some time for the gateway to shut down. You can call the +DescribeGatewayInformation API to check the status. For more information, see +ActivateGateway. If do not intend to use the gateway again, you must delete the gateway +(using DeleteGateway) to no longer pay software charges associated with the gateway. # Arguments - `gateway_arn`: @@ -3869,8 +3882,9 @@ end Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This -operation is supported only for volume, tape and S3 file gateways. FSx file gateways do not -support bandwidth rate limits. +operation is supported for volume, tape, and S3 file gateways. S3 file gateways support +bandwidth rate limits for upload only. FSx file gateways do not support bandwidth rate +limits. # Arguments - `bandwidth_rate_limit_intervals`: An array containing bandwidth rate limit schedule @@ -4038,11 +4052,11 @@ end update_gateway_information(gateway_arn) update_gateway_information(gateway_arn, params::Dict{String,<:Any}) -Updates a gateway's metadata, which includes the gateway's name and time zone. To specify -which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request. - For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID -rather than the gateway name. However, changing the name of the gateway has no effect on -the gateway's ARN. +Updates a gateway's metadata, which includes the gateway's name, time zone, and metadata +cache size. To specify which gateway to update, use the Amazon Resource Name (ARN) of the +gateway in your request. For gateways activated after September 2, 2015, the gateway's ARN +contains the gateway ID rather than the gateway name. However, changing the name of the +gateway has no effect on the gateway's ARN. # Arguments - `gateway_arn`: @@ -4052,7 +4066,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CloudWatchLogGroupARN"`: The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway. For more information, see What is Amazon CloudWatch Logs? -- `"GatewayCapacity"`: Specifies the size of the gateway's metadata cache. +- `"GatewayCapacity"`: Specifies the size of the gateway's metadata cache. This setting + impacts gateway performance and hardware recommendations. For more information, see + Performance guidance for gateways with multiple file shares in the Amazon S3 File Gateway + User Guide. - `"GatewayName"`: - `"GatewayTimezone"`: A value that indicates the time zone of the gateway. """ @@ -4125,61 +4142,60 @@ function update_gateway_software_now( end """ - update_maintenance_start_time(gateway_arn, hour_of_day, minute_of_hour) - update_maintenance_start_time(gateway_arn, hour_of_day, minute_of_hour, params::Dict{String,<:Any}) + update_maintenance_start_time(gateway_arn) + update_maintenance_start_time(gateway_arn, params::Dict{String,<:Any}) -Updates a gateway's weekly maintenance start time information, including day and time of -the week. The maintenance time is the time in your gateway's time zone. +Updates a gateway's maintenance window schedule, with settings for monthly or weekly +cadence, specific day and time to begin maintenance, and which types of updates to apply. +Time configuration uses the gateway's time zone. You can pass values for a complete +maintenance schedule, or update policy, or both. Previous values will persist for whichever +setting you choose not to modify. If an incomplete or invalid maintenance schedule is +passed, the entire request will be rejected with an error and no changes will occur. A +complete maintenance schedule must include values for both MinuteOfHour and HourOfDay, and +either DayOfMonth or DayOfWeek. We recommend keeping maintenance updates turned on, except +in specific use cases where the brief disruptions caused by updating the gateway could +critically impact your deployment. # Arguments - `gateway_arn`: -- `hour_of_day`: The hour component of the maintenance start time represented as hh, where - hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway. -- `minute_of_hour`: The minute component of the maintenance start time represented as mm, - where mm is the minute (00 to 59). The minute of the hour is in the time zone of the - gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DayOfMonth"`: The day of the month component of the maintenance start time represented - as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 - represents the last day of the month. + as an ordinal number from 1 to 28, where 1 represents the first day of the month. It is not + possible to set the maintenance schedule to start on days 29 through 31. - `"DayOfWeek"`: The day of the week component of the maintenance start time week - represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday. + represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 represents + Saturday. +- `"HourOfDay"`: The hour component of the maintenance start time represented as hh, where + hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway. +- `"MinuteOfHour"`: The minute component of the maintenance start time represented as mm, + where mm is the minute (00 to 59). The minute of the hour is in the time zone of the + gateway. +- `"SoftwareUpdatePreferences"`: A set of variables indicating the software update + preferences for the gateway. Includes AutomaticUpdatePolicy field with the following + inputs: ALL_VERSIONS - Enables regular gateway maintenance updates. + EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates. """ function update_maintenance_start_time( - GatewayARN, HourOfDay, MinuteOfHour; aws_config::AbstractAWSConfig=global_aws_config() + GatewayARN; aws_config::AbstractAWSConfig=global_aws_config() ) return storage_gateway( "UpdateMaintenanceStartTime", - Dict{String,Any}( - "GatewayARN" => GatewayARN, - "HourOfDay" => HourOfDay, - "MinuteOfHour" => MinuteOfHour, - ); + Dict{String,Any}("GatewayARN" => GatewayARN); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_maintenance_start_time( GatewayARN, - HourOfDay, - MinuteOfHour, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return storage_gateway( "UpdateMaintenanceStartTime", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "GatewayARN" => GatewayARN, - "HourOfDay" => HourOfDay, - "MinuteOfHour" => MinuteOfHour, - ), - params, - ), + mergewith(_merge, Dict{String,Any}("GatewayARN" => GatewayARN), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -4472,22 +4488,28 @@ end update_smbsecurity_strategy(gateway_arn, smbsecurity_strategy) update_smbsecurity_strategy(gateway_arn, smbsecurity_strategy, params::Dict{String,<:Any}) -Updates the SMB security strategy on a file gateway. This action is only supported in file -gateways. This API is called Security level in the User Guide. A higher security level can -affect performance of the gateway. +Updates the SMB security strategy level for an Amazon S3 file gateway. This action is only +supported for Amazon S3 file gateways. For information about configuring this setting +using the Amazon Web Services console, see Setting a security level for your gateway in the +Amazon S3 File Gateway User Guide. A higher security strategy level can affect performance +of the gateway. # Arguments - `gateway_arn`: -- `smbsecurity_strategy`: Specifies the type of security strategy. ClientSpecified: if you - use this option, requests are established based on what is negotiated by the client. This - option is recommended when you want to maximize compatibility across different clients in - your environment. Supported only in S3 File Gateway. MandatorySigning: if you use this - option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing - enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 - or newer. MandatoryEncryption: if you use this option, file gateway only allows connections - from SMBv3 clients that have encryption enabled. This option is highly recommended for - environments that handle sensitive data. This option works with SMB clients on Microsoft - Windows 8, Windows Server 2012 or newer. +- `smbsecurity_strategy`: Specifies the type of security strategy. ClientSpecified: If you + choose this option, requests are established based on what is negotiated by the client. + This option is recommended when you want to maximize compatibility across different clients + in your environment. Supported only for S3 File Gateway. MandatorySigning: If you choose + this option, File Gateway only allows connections from SMBv2 or SMBv3 clients that have + signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows + Server 2008 or newer. MandatoryEncryption: If you choose this option, File Gateway only + allows connections from SMBv3 clients that have encryption enabled. This option is + recommended for environments that handle sensitive data. This option works with SMB clients + on Microsoft Windows 8, Windows Server 2012 or newer. MandatoryEncryptionNoAes128: If you + choose this option, File Gateway only allows connections from SMBv3 clients that use + 256-bit AES encryption algorithms. 128-bit algorithms are not allowed. This option is + recommended for environments that handle sensitive data. It works with SMB clients on + Microsoft Windows 8, Windows Server 2012, or later. """ function update_smbsecurity_strategy( diff --git a/src/services/sts.jl b/src/services/sts.jl index ddf5c0f02b..f914a67d19 100644 --- a/src/services/sts.jl +++ b/src/services/sts.jl @@ -143,6 +143,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide. +- `"ProvidedContexts"`: A list of previously acquired trusted context assertions in the + format of a JSON array. The trusted context assertion is signed and encrypted by Amazon Web + Services STS. The following is an example of a ProvidedContext value that includes a single + trusted context assertion and the ARN of the context provider from which the trusted + context assertion was generated. + [{\"ProviderArn\":\"arn:aws:iam::aws:contextProvider/IdentityCenter\",\"ContextAssertion + \":\"trusted-context-assertion\"}] - `"SerialNumber"`: The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either @@ -500,7 +507,8 @@ use web identity federation to get access to content in Amazon S3. - `web_identity_token`: The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application - makes an AssumeRoleWithWebIdentity call. + makes an AssumeRoleWithWebIdentity call. Only tokens with RSA algorithms (RS256) are + supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/supplychain.jl b/src/services/supplychain.jl new file mode 100644 index 0000000000..10565faf15 --- /dev/null +++ b/src/services/supplychain.jl @@ -0,0 +1,158 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: supplychain +using AWS.Compat +using AWS.UUIDs + +""" + create_bill_of_materials_import_job(instance_id, s3uri) + create_bill_of_materials_import_job(instance_id, s3uri, params::Dict{String,<:Any}) + +CreateBillOfMaterialsImportJob creates an import job for the Product Bill Of Materials +(BOM) entity. For information on the product_bom entity, see the AWS Supply Chain User +Guide. The CSV file must be located in an Amazon S3 location accessible to AWS Supply +Chain. It is recommended to use the same Amazon S3 bucket created during your AWS Supply +Chain instance creation. + +# Arguments +- `instance_id`: The AWS Supply Chain instance identifier. +- `s3uri`: The S3 URI of the CSV file to be imported. The bucket must grant permissions for + AWS Supply Chain to read the file. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: An idempotency token. +""" +function create_bill_of_materials_import_job( + instanceId, s3uri; aws_config::AbstractAWSConfig=global_aws_config() +) + return supplychain( + "POST", + "/api/configuration/instances/$(instanceId)/bill-of-materials-import-jobs", + Dict{String,Any}("s3uri" => s3uri, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_bill_of_materials_import_job( + instanceId, + s3uri, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return supplychain( + "POST", + "/api/configuration/instances/$(instanceId)/bill-of-materials-import-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("s3uri" => s3uri, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_bill_of_materials_import_job(instance_id, job_id) + get_bill_of_materials_import_job(instance_id, job_id, params::Dict{String,<:Any}) + +Get status and details of a BillOfMaterialsImportJob. + +# Arguments +- `instance_id`: The AWS Supply Chain instance identifier. +- `job_id`: The BillOfMaterialsImportJob identifier. + +""" +function get_bill_of_materials_import_job( + instanceId, jobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return supplychain( + "GET", + "/api/configuration/instances/$(instanceId)/bill-of-materials-import-jobs/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_bill_of_materials_import_job( + instanceId, + jobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return supplychain( + "GET", + "/api/configuration/instances/$(instanceId)/bill-of-materials-import-jobs/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + send_data_integration_event(data, event_group_id, event_type, instance_id) + send_data_integration_event(data, event_group_id, event_type, instance_id, params::Dict{String,<:Any}) + +Send transactional data events with real-time data for analysis or monitoring. + +# Arguments +- `data`: The data payload of the event. +- `event_group_id`: Event identifier (for example, orderId for InboundOrder) used for data + sharing or partitioning. +- `event_type`: The data event type. +- `instance_id`: The AWS Supply Chain instance identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The idempotent client token. +- `"eventTimestamp"`: The event timestamp (in epoch seconds). +""" +function send_data_integration_event( + data, + eventGroupId, + eventType, + instanceId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return supplychain( + "POST", + "/api-data/data-integration/instance/$(instanceId)/data-integration-events", + Dict{String,Any}( + "data" => data, + "eventGroupId" => eventGroupId, + "eventType" => eventType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function send_data_integration_event( + data, + eventGroupId, + eventType, + instanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return supplychain( + "POST", + "/api-data/data-integration/instance/$(instanceId)/data-integration-events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "data" => data, + "eventGroupId" => eventGroupId, + "eventType" => eventType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/swf.jl b/src/services/swf.jl index d67653e51a..29aa1ebd15 100644 --- a/src/services/swf.jl +++ b/src/services/swf.jl @@ -236,13 +236,12 @@ function count_pending_decision_tasks( end """ - deprecate_activity_type(activity_type, domain) - deprecate_activity_type(activity_type, domain, params::Dict{String,<:Any}) + delete_activity_type(activity_type, domain) + delete_activity_type(activity_type, domain, params::Dict{String,<:Any}) -Deprecates the specified activity type. After an activity type has been deprecated, you -cannot create new tasks of that activity type. Tasks of this type that were scheduled -before the type was deprecated continue to run. This operation is eventually consistent. -The results are best effort and may not exactly reflect recent updates and changes. +Deletes the specified activity type. Note: Prior to deletion, activity types must first be +deprecated. After an activity type has been deleted, you cannot schedule new activities +of that type. Activities that started before the type was deleted will continue to run. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: Use a Resource element with the domain name to limit the action to only specified domains. Use an Action element to allow or deny permission to call this @@ -255,6 +254,113 @@ fails. The associated event attribute's cause parameter is set to OPERATION_NOT_ For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. +# Arguments +- `activity_type`: The activity type to delete. +- `domain`: The name of the domain in which the activity type is registered. + +""" +function delete_activity_type( + activityType, domain; aws_config::AbstractAWSConfig=global_aws_config() +) + return swf( + "DeleteActivityType", + Dict{String,Any}("activityType" => activityType, "domain" => domain); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_activity_type( + activityType, + domain, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return swf( + "DeleteActivityType", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("activityType" => activityType, "domain" => domain), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_workflow_type(domain, workflow_type) + delete_workflow_type(domain, workflow_type, params::Dict{String,<:Any}) + +Deletes the specified workflow type. Note: Prior to deletion, workflow types must first be +deprecated. After a workflow type has been deleted, you cannot create new executions of +that type. Executions that started before the type was deleted will continue to run. +Access Control You can use IAM policies to control this action's access to Amazon SWF +resources as follows: Use a Resource element with the domain name to limit the action to +only specified domains. Use an Action element to allow or deny permission to call this +action. Constrain the following parameters by using a Condition element with the +appropriate keys. workflowType.name: String constraint. The key is +swf:workflowType.name. workflowType.version: String constraint. The key is +swf:workflowType.version. If the caller doesn't have sufficient permissions to invoke +the action, or the parameter values fall outside the specified constraints, the action +fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF +Workflows in the Amazon SWF Developer Guide. + +# Arguments +- `domain`: The name of the domain in which the workflow type is registered. +- `workflow_type`: The workflow type to delete. + +""" +function delete_workflow_type( + domain, workflowType; aws_config::AbstractAWSConfig=global_aws_config() +) + return swf( + "DeleteWorkflowType", + Dict{String,Any}("domain" => domain, "workflowType" => workflowType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_workflow_type( + domain, + workflowType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return swf( + "DeleteWorkflowType", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("domain" => domain, "workflowType" => workflowType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + deprecate_activity_type(activity_type, domain) + deprecate_activity_type(activity_type, domain, params::Dict{String,<:Any}) + +Deprecates the specified activity type. After an activity type has been deprecated, you +cannot create new tasks of that activity type. Tasks of this type that were scheduled +before the type was deprecated continue to run. Access Control You can use IAM policies +to control this action's access to Amazon SWF resources as follows: Use a Resource +element with the domain name to limit the action to only specified domains. Use an Action +element to allow or deny permission to call this action. Constrain the following +parameters by using a Condition element with the appropriate keys. activityType.name: +String constraint. The key is swf:activityType.name. activityType.version: String +constraint. The key is swf:activityType.version. If the caller doesn't have sufficient +permissions to invoke the action, or the parameter values fall outside the specified +constraints, the action fails. The associated event attribute's cause parameter is set to +OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage +Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. + # Arguments - `activity_type`: The activity type to deprecate. - `domain`: The name of the domain in which the activity type is registered. @@ -1776,6 +1882,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"decisions"`: The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the Decision structure for details. - `"executionContext"`: User defined context to add to workflow execution. +- `"taskList"`: The task list to use for the future decision tasks of this workflow + execution. This list overrides the original task list you specified while starting the + workflow execution. +- `"taskListScheduleToStartTimeout"`: Specifies a timeout (in seconds) for the task list + override. When this parameter is missing, the task list override is permanent. This + parameter makes it possible to temporarily override the task list. If a decision task + scheduled on the override task list is not started within the timeout, the decision task + will time out. Amazon SWF will revert the override and schedule a new decision task to the + original task list. If a decision task scheduled on the override task list is started + within the timeout, but not completed within the start-to-close timeout, Amazon SWF will + also revert the override and schedule a new decision task to the original task list. """ function respond_decision_task_completed( taskToken; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/taxsettings.jl b/src/services/taxsettings.jl new file mode 100644 index 0000000000..d40a1b7101 --- /dev/null +++ b/src/services/taxsettings.jl @@ -0,0 +1,380 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: taxsettings +using AWS.Compat +using AWS.UUIDs + +""" + batch_delete_tax_registration(account_ids) + batch_delete_tax_registration(account_ids, params::Dict{String,<:Any}) + +Deletes tax registration for multiple accounts in batch. This can be used to delete tax +registrations for up to five accounts in one batch. This API operation can't be used to +delete your tax registration in Brazil. Use the Payment preferences page in the Billing and +Cost Management console instead. + +# Arguments +- `account_ids`: List of unique account identifiers. + +""" +function batch_delete_tax_registration( + accountIds; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/BatchDeleteTaxRegistration", + Dict{String,Any}("accountIds" => accountIds); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_tax_registration( + accountIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return taxsettings( + "POST", + "/BatchDeleteTaxRegistration", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("accountIds" => accountIds), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_put_tax_registration(account_ids, tax_registration_entry) + batch_put_tax_registration(account_ids, tax_registration_entry, params::Dict{String,<:Any}) + +Adds or updates tax registration for multiple accounts in batch. This can be used to add or +update tax registrations for up to five accounts in one batch. You can't set a TRN if +there's a pending TRN. You'll need to delete the pending TRN first. To call this API +operation for specific countries, see the following country-specific requirements. +Bangladesh You must specify the tax registration certificate document in the +taxRegistrationDocuments field of the VerificationDetails object. Brazil You must +complete the tax registration process in the Payment preferences page in the Billing and +Cost Management console. After your TRN and billing address are verified, you can call this +API operation. For Amazon Web Services accounts created through Organizations, you can +call this API operation when you don't have a billing address. Georgia The valid +personType values are Physical Person and Business. Kenya You must specify the +personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If +the personType is Physical Person, you must specify the tax registration certificate +document in the taxRegistrationDocuments field of the VerificationDetails object. +Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, +only resellers with a valid sales and service tax (SST) number are required to provide tax +registration information. By using this API operation to set a TRN in Malaysia, Amazon +Web Services will regard you as self-declaring that you're an authorized business reseller +registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. +Amazon Web Services reserves the right to seek additional information and/or take other +actions to support your self-declaration as appropriate. If you're not a reseller of +Amazon Web Services, we don't recommend that you use this operation to set the TRN in +Malaysia. Only use this API operation to upload the TRNs for accounts through which +you're reselling Amazon Web Services. Amazon Web Services is currently registered under +the following service tax codes. You must include at least one of the service tax codes in +the service tax code strings to declare yourself as an authorized registered business +reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or +coaching service - 9907071685 IT service - 9907101676 Digital services and electronic +medium - 9907121690 Nepal The sector valid values are Business and Individual. +Saudi Arabia For address, you must specify addressLine3. South Korea You must +specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean +characters for legalName. You must specify the businessRepresentativeName, +itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the +additionalTaxInformation object. Use Korean characters for these fields. You must specify +the tax registration certificate document in the taxRegistrationDocuments field of the +VerificationDetails object. For the address object, use Korean characters for +addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must +specify the registrationType in the spainAdditionalInfo field of the +additionalTaxInformation object. If the registrationType is Local, you must specify the +tax registration certificate document in the taxRegistrationDocuments field of the +VerificationDetails object. Turkey You must specify the sector in the +taxRegistrationEntry object. If your sector is Business, Individual, or Government: +Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) +Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In +the Tax Settings page of the Billing console, Government appears as Public institutions + If your sector is Business and you're subject to KDV tax, you must specify your industry +in the industries field. For address, you must specify districtOrCounty. Ukraine +The sector valid values are Business and Individual. + +# Arguments +- `account_ids`: List of unique account identifiers. +- `tax_registration_entry`: Your TRN information that will be stored to the accounts + mentioned in putEntries. + +""" +function batch_put_tax_registration( + accountIds, taxRegistrationEntry; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/BatchPutTaxRegistration", + Dict{String,Any}( + "accountIds" => accountIds, "taxRegistrationEntry" => taxRegistrationEntry + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_put_tax_registration( + accountIds, + taxRegistrationEntry, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return taxsettings( + "POST", + "/BatchPutTaxRegistration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "accountIds" => accountIds, + "taxRegistrationEntry" => taxRegistrationEntry, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_tax_registration() + delete_tax_registration(params::Dict{String,<:Any}) + +Deletes tax registration for a single account. This API operation can't be used to delete +your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost +Management console instead. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountId"`: Unique account identifier for the TRN information that needs to be + deleted. If this isn't passed, the account ID corresponding to the credentials of the API + caller will be used for this parameter. +""" +function delete_tax_registration(; aws_config::AbstractAWSConfig=global_aws_config()) + return taxsettings( + "POST", + "/DeleteTaxRegistration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_tax_registration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/DeleteTaxRegistration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_tax_registration() + get_tax_registration(params::Dict{String,<:Any}) + +Retrieves tax registration for a single account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountId"`: Your unique account identifier. +""" +function get_tax_registration(; aws_config::AbstractAWSConfig=global_aws_config()) + return taxsettings( + "POST", + "/GetTaxRegistration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_tax_registration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/GetTaxRegistration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_tax_registration_document(destination_s3_location, tax_document_metadata) + get_tax_registration_document(destination_s3_location, tax_document_metadata, params::Dict{String,<:Any}) + +Downloads your tax documents to the Amazon S3 bucket that you specify in your request. + +# Arguments +- `destination_s3_location`: The Amazon S3 bucket that you specify to download your tax + documents to. +- `tax_document_metadata`: The metadata for your tax document. + +""" +function get_tax_registration_document( + destinationS3Location, + taxDocumentMetadata; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return taxsettings( + "POST", + "/GetTaxRegistrationDocument", + Dict{String,Any}( + "destinationS3Location" => destinationS3Location, + "taxDocumentMetadata" => taxDocumentMetadata, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_tax_registration_document( + destinationS3Location, + taxDocumentMetadata, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return taxsettings( + "POST", + "/GetTaxRegistrationDocument", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinationS3Location" => destinationS3Location, + "taxDocumentMetadata" => taxDocumentMetadata, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tax_registrations() + list_tax_registrations(params::Dict{String,<:Any}) + +Retrieves the tax registration of accounts listed in a consolidated billing family. This +can be used to retrieve up to 100 accounts' tax registrations in one call (default 50). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Number of accountDetails results you want in one response. +- `"nextToken"`: The token to retrieve the next set of results. +""" +function list_tax_registrations(; aws_config::AbstractAWSConfig=global_aws_config()) + return taxsettings( + "POST", + "/ListTaxRegistrations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tax_registrations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/ListTaxRegistrations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + put_tax_registration(tax_registration_entry) + put_tax_registration(tax_registration_entry, params::Dict{String,<:Any}) + +Adds or updates tax registration for a single account. You can't set a TRN if there's a +pending TRN. You'll need to delete the pending TRN first. To call this API operation for +specific countries, see the following country-specific requirements. Bangladesh You +must specify the tax registration certificate document in the taxRegistrationDocuments +field of the VerificationDetails object. Brazil You must complete the tax +registration process in the Payment preferences page in the Billing and Cost Management +console. After your TRN and billing address are verified, you can call this API operation. + For Amazon Web Services accounts created through Organizations, you can call this API +operation when you don't have a billing address. Georgia The valid personType values +are Physical Person and Business. Kenya You must specify the personType in the +kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is +Physical Person, you must specify the tax registration certificate document in the +taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use +this operation to set a tax registration number (TRN) in Malaysia, only resellers with a +valid sales and service tax (SST) number are required to provide tax registration +information. By using this API operation to set a TRN in Malaysia, Amazon Web Services +will regard you as self-declaring that you're an authorized business reseller registered +with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web +Services reserves the right to seek additional information and/or take other actions to +support your self-declaration as appropriate. If you're not a reseller of Amazon Web +Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only +use this API operation to upload the TRNs for accounts through which you're reselling +Amazon Web Services. Amazon Web Services is currently registered under the following +service tax codes. You must include at least one of the service tax codes in the service +tax code strings to declare yourself as an authorized registered business reseller. Taxable +service and service tax codes: Consultancy - 9907061674 Training or coaching service - +9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 +Nepal The sector valid values are Business and Individual. Saudi Arabia For +address, you must specify addressLine3. South Korea You must specify the +certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters +for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and +lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation +object. Use Korean characters for these fields. You must specify the tax registration +certificate document in the taxRegistrationDocuments field of the VerificationDetails +object. For the address object, use Korean characters for addressLine1, addressLine2 +city, postalCode, and stateOrRegion. Spain You must specify the registrationType in +the spainAdditionalInfo field of the additionalTaxInformation object. If the +registrationType is Local, you must specify the tax registration certificate document in +the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must +specify the sector in the taxRegistrationEntry object. If your sector is Business, +Individual, or Government: Specify the taxOffice. If your sector is Individual, don't +enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't +enter this value. Note: In the Tax Settings page of the Billing console, Government +appears as Public institutions If your sector is Business and you're subject to KDV +tax, you must specify your industry in the industries field. For address, you must +specify districtOrCounty. Ukraine The sector valid values are Business and +Individual. + +# Arguments +- `tax_registration_entry`: Your TRN information that will be stored to the account + mentioned in accountId. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountId"`: Your unique account identifier. +""" +function put_tax_registration( + taxRegistrationEntry; aws_config::AbstractAWSConfig=global_aws_config() +) + return taxsettings( + "POST", + "/PutTaxRegistration", + Dict{String,Any}("taxRegistrationEntry" => taxRegistrationEntry); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_tax_registration( + taxRegistrationEntry, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return taxsettings( + "POST", + "/PutTaxRegistration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("taxRegistrationEntry" => taxRegistrationEntry), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/textract.jl b/src/services/textract.jl index 9d8b42f71f..0582ee8bee 100644 --- a/src/services/textract.jl +++ b/src/services/textract.jl @@ -39,13 +39,13 @@ StartDocumentAnalysis. For more information, see Document Text Analysis. - `feature_types`: A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. - To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect - signatures within form data and table data, add SIGNATURES to either TABLES or FORMS. All - lines and words detected in the document are included in the response (including text that - isn't related to the value of FeatureTypes). + Add LAYOUT to the list to return information about the layout of the document. All lines + and words detected in the document are included in the response (including text that isn't + related to the value of FeatureTypes). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AdaptersConfig"`: Specifies the adapter to be used when analyzing a document. - `"HumanLoopConfig"`: Sets the configuration for the human in the loop workflow for analyzing documents. - `"QueriesConfig"`: Contains Queries and the alias for those Queries, as determined by the @@ -154,6 +154,217 @@ function analyze_id( ) end +""" + create_adapter(adapter_name, feature_types) + create_adapter(adapter_name, feature_types, params::Dict{String,<:Any}) + +Creates an adapter, which can be fine-tuned for enhanced performance on user provided +documents. Takes an AdapterName and FeatureType. Currently the only supported feature type +is QUERIES. You can also provide a Description, Tags, and a ClientRequestToken. You can +choose whether or not the adapter should be AutoUpdated with the AutoUpdate argument. By +default, AutoUpdate is set to DISABLED. + +# Arguments +- `adapter_name`: The name to be assigned to the adapter being created. +- `feature_types`: The type of feature that the adapter is being trained on. Currrenly, + supported feature types are: QUERIES + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AutoUpdate"`: Controls whether or not the adapter should automatically update. +- `"ClientRequestToken"`: Idempotent token is used to recognize the request. If the same + token is used with multiple CreateAdapter requests, the same session is returned. This + token is employed to avoid unintentionally creating the same session multiple times. +- `"Description"`: The description to be assigned to the adapter being created. +- `"Tags"`: A list of tags to be added to the adapter. +""" +function create_adapter( + AdapterName, FeatureTypes; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "CreateAdapter", + Dict{String,Any}( + "AdapterName" => AdapterName, + "FeatureTypes" => FeatureTypes, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_adapter( + AdapterName, + FeatureTypes, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "CreateAdapter", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AdapterName" => AdapterName, + "FeatureTypes" => FeatureTypes, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_adapter_version(adapter_id, dataset_config, output_config) + create_adapter_version(adapter_id, dataset_config, output_config, params::Dict{String,<:Any}) + +Creates a new version of an adapter. Operates on a provided AdapterId and a specified +dataset provided via the DatasetConfig argument. Requires that you specify an Amazon S3 +bucket with the OutputConfig argument. You can provide an optional KMSKeyId, an optional +ClientRequestToken, and optional tags. + +# Arguments +- `adapter_id`: A string containing a unique ID for the adapter that will receive a new + version. +- `dataset_config`: Specifies a dataset used to train a new adapter version. Takes a + ManifestS3Object as the value. +- `output_config`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: Idempotent token is used to recognize the request. If the same + token is used with multiple CreateAdapterVersion requests, the same session is returned. + This token is employed to avoid unintentionally creating the same session multiple times. +- `"KMSKeyId"`: The identifier for your AWS Key Management Service key (AWS KMS key). Used + to encrypt your documents. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the adapter version. +""" +function create_adapter_version( + AdapterId, + DatasetConfig, + OutputConfig; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "CreateAdapterVersion", + Dict{String,Any}( + "AdapterId" => AdapterId, + "DatasetConfig" => DatasetConfig, + "OutputConfig" => OutputConfig, + "ClientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_adapter_version( + AdapterId, + DatasetConfig, + OutputConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "CreateAdapterVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AdapterId" => AdapterId, + "DatasetConfig" => DatasetConfig, + "OutputConfig" => OutputConfig, + "ClientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_adapter(adapter_id) + delete_adapter(adapter_id, params::Dict{String,<:Any}) + +Deletes an Amazon Textract adapter. Takes an AdapterId and deletes the adapter specified by +the ID. + +# Arguments +- `adapter_id`: A string containing a unique ID for the adapter to be deleted. + +""" +function delete_adapter(AdapterId; aws_config::AbstractAWSConfig=global_aws_config()) + return textract( + "DeleteAdapter", + Dict{String,Any}("AdapterId" => AdapterId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_adapter( + AdapterId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "DeleteAdapter", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AdapterId" => AdapterId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_adapter_version(adapter_id, adapter_version) + delete_adapter_version(adapter_id, adapter_version, params::Dict{String,<:Any}) + +Deletes an Amazon Textract adapter version. Requires that you specify both an AdapterId and +a AdapterVersion. Deletes the adapter version specified by the AdapterId and the +AdapterVersion. + +# Arguments +- `adapter_id`: A string containing a unique ID for the adapter version that will be + deleted. +- `adapter_version`: Specifies the adapter version to be deleted. + +""" +function delete_adapter_version( + AdapterId, AdapterVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "DeleteAdapterVersion", + Dict{String,Any}("AdapterId" => AdapterId, "AdapterVersion" => AdapterVersion); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_adapter_version( + AdapterId, + AdapterVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "DeleteAdapterVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AdapterId" => AdapterId, "AdapterVersion" => AdapterVersion + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ detect_document_text(document) detect_document_text(document, params::Dict{String,<:Any}) @@ -198,6 +409,87 @@ function detect_document_text( ) end +""" + get_adapter(adapter_id) + get_adapter(adapter_id, params::Dict{String,<:Any}) + +Gets configuration information for an adapter specified by an AdapterId, returning +information on AdapterName, Description, CreationTime, AutoUpdate status, and FeatureTypes. + +# Arguments +- `adapter_id`: A string containing a unique ID for the adapter. + +""" +function get_adapter(AdapterId; aws_config::AbstractAWSConfig=global_aws_config()) + return textract( + "GetAdapter", + Dict{String,Any}("AdapterId" => AdapterId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_adapter( + AdapterId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "GetAdapter", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AdapterId" => AdapterId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_adapter_version(adapter_id, adapter_version) + get_adapter_version(adapter_id, adapter_version, params::Dict{String,<:Any}) + +Gets configuration information for the specified adapter version, including: AdapterId, +AdapterVersion, FeatureTypes, Status, StatusMessage, DatasetConfig, KMSKeyId, OutputConfig, +Tags and EvaluationMetrics. + +# Arguments +- `adapter_id`: A string specifying a unique ID for the adapter version you want to + retrieve information for. +- `adapter_version`: A string specifying the adapter version you want to retrieve + information for. + +""" +function get_adapter_version( + AdapterId, AdapterVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "GetAdapterVersion", + Dict{String,Any}("AdapterId" => AdapterId, "AdapterVersion" => AdapterVersion); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_adapter_version( + AdapterId, + AdapterVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "GetAdapterVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AdapterId" => AdapterId, "AdapterVersion" => AdapterVersion + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_document_analysis(job_id) get_document_analysis(job_id, params::Dict{String,<:Any}) @@ -463,6 +755,104 @@ function get_lending_analysis_summary( ) end +""" + list_adapter_versions() + list_adapter_versions(params::Dict{String,<:Any}) + +List all version of an adapter that meet the specified filtration criteria. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AdapterId"`: A string containing a unique ID for the adapter to match for when listing + adapter versions. +- `"AfterCreationTime"`: Specifies the lower bound for the ListAdapterVersions operation. + Ensures ListAdapterVersions returns only adapter versions created after the specified + creation time. +- `"BeforeCreationTime"`: Specifies the upper bound for the ListAdapterVersions operation. + Ensures ListAdapterVersions returns only adapter versions created after the specified + creation time. +- `"MaxResults"`: The maximum number of results to return when listing adapter versions. +- `"NextToken"`: Identifies the next page of results to return when listing adapter + versions. +""" +function list_adapter_versions(; aws_config::AbstractAWSConfig=global_aws_config()) + return textract( + "ListAdapterVersions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_adapter_versions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "ListAdapterVersions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_adapters() + list_adapters(params::Dict{String,<:Any}) + +Lists all adapters that match the specified filtration criteria. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AfterCreationTime"`: Specifies the lower bound for the ListAdapters operation. Ensures + ListAdapters returns only adapters created after the specified creation time. +- `"BeforeCreationTime"`: Specifies the upper bound for the ListAdapters operation. Ensures + ListAdapters returns only adapters created before the specified creation time. +- `"MaxResults"`: The maximum number of results to return when listing adapters. +- `"NextToken"`: Identifies the next page of results to return when listing adapters. +""" +function list_adapters(; aws_config::AbstractAWSConfig=global_aws_config()) + return textract("ListAdapters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_adapters( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "ListAdapters", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all tags for an Amazon Textract resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that specifies the resource to list tags + for. + +""" +function list_tags_for_resource( + ResourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "ListTagsForResource", + Dict{String,Any}("ResourceARN" => ResourceARN); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceARN" => ResourceARN), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_document_analysis(document_location, feature_types) start_document_analysis(document_location, feature_types, params::Dict{String,<:Any}) @@ -489,6 +879,7 @@ StartDocumentAnalysis. For more information, see Document Text Analysis. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AdaptersConfig"`: Specifies the adapter to be used when analyzing a document. - `"ClientRequestToken"`: The idempotent token that you use to identify the start request. If you use the same token with multiple StartDocumentAnalysis requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidentally started @@ -739,3 +1130,123 @@ function start_lending_analysis( feature_set=SERVICE_FEATURE_SET, ) end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds one or more tags to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that specifies the resource to be tagged. +- `tags`: A set of tags (key-value pairs) that you want to assign to the resource. + +""" +function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return textract( + "TagResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceARN, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "Tags" => Tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes any tags with the specified keys from the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) that specifies the resource to be untagged. +- `tag_keys`: Specifies the tags to be removed from the resource specified by the + ResourceARN. + +""" +function untag_resource( + ResourceARN, TagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return textract( + "UntagResource", + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceARN, + TagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ResourceARN" => ResourceARN, "TagKeys" => TagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_adapter(adapter_id) + update_adapter(adapter_id, params::Dict{String,<:Any}) + +Update the configuration for an adapter. FeatureTypes configurations cannot be updated. At +least one new parameter must be specified as an argument. + +# Arguments +- `adapter_id`: A string containing a unique ID for the adapter that will be updated. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AdapterName"`: The new name to be applied to the adapter. +- `"AutoUpdate"`: The new auto-update status to be applied to the adapter. +- `"Description"`: The new description to be applied to the adapter. +""" +function update_adapter(AdapterId; aws_config::AbstractAWSConfig=global_aws_config()) + return textract( + "UpdateAdapter", + Dict{String,Any}("AdapterId" => AdapterId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_adapter( + AdapterId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return textract( + "UpdateAdapter", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("AdapterId" => AdapterId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/timestream_influxdb.jl b/src/services/timestream_influxdb.jl new file mode 100644 index 0000000000..9766e407c9 --- /dev/null +++ b/src/services/timestream_influxdb.jl @@ -0,0 +1,459 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: timestream_influxdb +using AWS.Compat +using AWS.UUIDs + +""" + create_db_instance(allocated_storage, db_instance_type, name, password, vpc_security_group_ids, vpc_subnet_ids) + create_db_instance(allocated_storage, db_instance_type, name, password, vpc_security_group_ids, vpc_subnet_ids, params::Dict{String,<:Any}) + +Creates a new Timestream for InfluxDB DB instance. + +# Arguments +- `allocated_storage`: The amount of storage to allocate for your DB storage type in GiB + (gibibytes). +- `db_instance_type`: The Timestream for InfluxDB DB instance type to run InfluxDB on. +- `name`: The name that uniquely identifies the DB instance when interacting with the + Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix + included in the endpoint. DB instance names must be unique per customer and per region. +- `password`: The password of the initial admin user created in InfluxDB. This password + will allow you to access the InfluxDB UI to perform various administrative tasks and also + use the InfluxDB CLI to create an operator token. These attributes will be stored in a + Secret created in AWS SecretManager in your account. +- `vpc_security_group_ids`: A list of VPC security group IDs to associate with the DB + instance. +- `vpc_subnet_ids`: A list of VPC subnet IDs to associate with the DB instance. Provide at + least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ + standby. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"bucket"`: The name of the initial InfluxDB bucket. All InfluxDB data is stored in a + bucket. A bucket combines the concept of a database and a retention period (the duration of + time that each data point persists). A bucket belongs to an organization. +- `"dbParameterGroupIdentifier"`: The id of the DB parameter group to assign to your DB + instance. DB parameter groups specify how the database is configured. For example, DB + parameter groups can specify the limit for query concurrency. +- `"dbStorageType"`: The Timestream for InfluxDB DB storage type to read and write InfluxDB + data. You can choose between 3 different types of provisioned Influx IOPS included storage + according to your workloads requirements: Influx IO Included 3000 IOPS Influx IO + Included 12000 IOPS Influx IO Included 16000 IOPS +- `"deploymentType"`: Specifies whether the DB instance will be deployed as a standalone + instance or with a Multi-AZ standby for high availability. +- `"logDeliveryConfiguration"`: Configuration for sending InfluxDB engine logs to a + specified S3 bucket. +- `"organization"`: The name of the initial organization for the initial admin user in + InfluxDB. An InfluxDB organization is a workspace for a group of users. +- `"publiclyAccessible"`: Configures the DB instance with a public IP to facilitate access. +- `"tags"`: A list of key-value pairs to associate with the DB instance. +- `"username"`: The username of the initial admin user created in InfluxDB. Must start with + a letter and can't end with a hyphen or contain two consecutive hyphens. For example, + my-user1. This username will allow you to access the InfluxDB UI to perform various + administrative tasks and also use the InfluxDB CLI to create an operator token. These + attributes will be stored in a Secret created in Amazon Secrets Manager in your account. +""" +function create_db_instance( + allocatedStorage, + dbInstanceType, + name, + password, + vpcSecurityGroupIds, + vpcSubnetIds; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "CreateDbInstance", + Dict{String,Any}( + "allocatedStorage" => allocatedStorage, + "dbInstanceType" => dbInstanceType, + "name" => name, + "password" => password, + "vpcSecurityGroupIds" => vpcSecurityGroupIds, + "vpcSubnetIds" => vpcSubnetIds, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_db_instance( + allocatedStorage, + dbInstanceType, + name, + password, + vpcSecurityGroupIds, + vpcSubnetIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "CreateDbInstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "allocatedStorage" => allocatedStorage, + "dbInstanceType" => dbInstanceType, + "name" => name, + "password" => password, + "vpcSecurityGroupIds" => vpcSecurityGroupIds, + "vpcSubnetIds" => vpcSubnetIds, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_db_parameter_group(name) + create_db_parameter_group(name, params::Dict{String,<:Any}) + +Creates a new Timestream for InfluxDB DB parameter group to associate with DB instances. + +# Arguments +- `name`: The name of the DB parameter group. The name must be unique per customer and per + region. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the DB parameter group. +- `"parameters"`: A list of the parameters that comprise the DB parameter group. +- `"tags"`: A list of key-value pairs to associate with the DB parameter group. +""" +function create_db_parameter_group(name; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "CreateDbParameterGroup", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_db_parameter_group( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "CreateDbParameterGroup", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_db_instance(identifier) + delete_db_instance(identifier, params::Dict{String,<:Any}) + +Deletes a Timestream for InfluxDB DB instance. + +# Arguments +- `identifier`: The id of the DB instance. + +""" +function delete_db_instance(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "DeleteDbInstance", + Dict{String,Any}("identifier" => identifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_db_instance( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "DeleteDbInstance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifier" => identifier), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_db_instance(identifier) + get_db_instance(identifier, params::Dict{String,<:Any}) + +Returns a Timestream for InfluxDB DB instance. + +# Arguments +- `identifier`: The id of the DB instance. + +""" +function get_db_instance(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "GetDbInstance", + Dict{String,Any}("identifier" => identifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_db_instance( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "GetDbInstance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifier" => identifier), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_db_parameter_group(identifier) + get_db_parameter_group(identifier, params::Dict{String,<:Any}) + +Returns a Timestream for InfluxDB DB parameter group. + +# Arguments +- `identifier`: The id of the DB parameter group. + +""" +function get_db_parameter_group( + identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "GetDbParameterGroup", + Dict{String,Any}("identifier" => identifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_db_parameter_group( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "GetDbParameterGroup", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifier" => identifier), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_db_instances() + list_db_instances(params::Dict{String,<:Any}) + +Returns a list of Timestream for InfluxDB DB instances. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to return in the output. If the total number + of items available is more than the value specified, a NextToken is provided in the output. + To resume pagination, provide the NextToken value as argument of a subsequent API + invocation. +- `"nextToken"`: The pagination token. To resume pagination, provide the NextToken value as + argument of a subsequent API invocation. +""" +function list_db_instances(; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "ListDbInstances"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_db_instances( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "ListDbInstances", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_db_parameter_groups() + list_db_parameter_groups(params::Dict{String,<:Any}) + +Returns a list of Timestream for InfluxDB DB parameter groups. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of items to return in the output. If the total number + of items available is more than the value specified, a NextToken is provided in the output. + To resume pagination, provide the NextToken value as argument of a subsequent API + invocation. +- `"nextToken"`: The pagination token. To resume pagination, provide the NextToken value as + argument of a subsequent API invocation. +""" +function list_db_parameter_groups(; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "ListDbParameterGroups"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_db_parameter_groups( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "ListDbParameterGroups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +A list of tags applied to the resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the tagged resource. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "ListTagsForResource", + Dict{String,Any}("resourceArn" => resourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceArn" => resourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Tags are composed of a Key/Value pairs. You can use tags to categorize and track your +Timestream for InfluxDB resources. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the tagged resource. +- `tags`: A list of tags used to categorize and track resources. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "TagResource", + Dict{String,Any}("resourceArn" => resourceArn, "tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceArn" => resourceArn, "tags" => tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes the tag from the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the tagged resource. +- `tag_keys`: The keys used to identify the tags. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_influxdb( + "UntagResource", + Dict{String,Any}("resourceArn" => resourceArn, "tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceArn" => resourceArn, "tagKeys" => tagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_db_instance(identifier) + update_db_instance(identifier, params::Dict{String,<:Any}) + +Updates a Timestream for InfluxDB DB instance. + +# Arguments +- `identifier`: The id of the DB instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dbParameterGroupIdentifier"`: The id of the DB parameter group to assign to your DB + instance. DB parameter groups specify how the database is configured. For example, DB + parameter groups can specify the limit for query concurrency. +- `"logDeliveryConfiguration"`: Configuration for sending InfluxDB engine logs to send to + specified S3 bucket. +""" +function update_db_instance(identifier; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_influxdb( + "UpdateDbInstance", + Dict{String,Any}("identifier" => identifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_db_instance( + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return timestream_influxdb( + "UpdateDbInstance", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("identifier" => identifier), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/timestream_query.jl b/src/services/timestream_query.jl index aabae6f55b..1ed4086d20 100644 --- a/src/services/timestream_query.jl +++ b/src/services/timestream_query.jl @@ -176,6 +176,31 @@ function delete_scheduled_query( ) end +""" + describe_account_settings() + describe_account_settings(params::Dict{String,<:Any}) + +Describes the settings for your account that include the query pricing model and the +configured maximum TCUs the service can use for your query workload. You're charged only +for the duration of compute units used for your workloads. + +""" +function describe_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_query( + "DescribeAccountSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_query( + "DescribeAccountSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_endpoints() describe_endpoints(params::Dict{String,<:Any}) @@ -369,8 +394,8 @@ end prepare_query(query_string, params::Dict{String,<:Any}) A synchronous operation that allows you to submit a query with parameters to be stored by -Timestream for later running. Timestream only supports using this operation with the -PrepareQueryRequestValidateOnly set to true. +Timestream for later running. Timestream only supports using this operation with +ValidateOnly set to true. # Arguments - `query_string`: The Timestream query string that you want to use as a prepared statement. @@ -577,6 +602,42 @@ function untag_resource( ) end +""" + update_account_settings() + update_account_settings(params::Dict{String,<:Any}) + +Transitions your account to use TCUs for query pricing and modifies the maximum query +compute units that you've configured. If you reduce the value of MaxQueryTCU to a desired +configuration, the new value can take up to 24 hours to be effective. After you've +transitioned your account to use TCUs for query pricing, you can't transition to using +bytes scanned for query pricing. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxQueryTCU"`: The maximum number of compute units the service will use at any point in + time to serve your queries. To run queries, you must set a minimum capacity of 4 TCU. You + can set the maximum number of TCU in multiples of 4, for example, 4, 8, 16, 32, and so on. + The maximum value supported for MaxQueryTCU is 1000. To request an increase to this soft + limit, contact Amazon Web Services Support. For information about the default quota for + maxQueryTCU, see Default quotas. +- `"QueryPricingModel"`: The pricing model for queries in an account. +""" +function update_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return timestream_query( + "UpdateAccountSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return timestream_query( + "UpdateAccountSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_scheduled_query(scheduled_query_arn, state) update_scheduled_query(scheduled_query_arn, state, params::Dict{String,<:Any}) diff --git a/src/services/transcribe.jl b/src/services/transcribe.jl index cbf0b09a64..0a1fbc1bc3 100644 --- a/src/services/transcribe.jl +++ b/src/services/transcribe.jl @@ -517,6 +517,47 @@ function delete_language_model( ) end +""" + delete_medical_scribe_job(medical_scribe_job_name) + delete_medical_scribe_job(medical_scribe_job_name, params::Dict{String,<:Any}) + +Deletes a Medical Scribe job. To use this operation, specify the name of the job you want +to delete using MedicalScribeJobName. Job names are case sensitive. + +# Arguments +- `medical_scribe_job_name`: The name of the Medical Scribe job you want to delete. Job + names are case sensitive. + +""" +function delete_medical_scribe_job( + MedicalScribeJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return transcribe( + "DeleteMedicalScribeJob", + Dict{String,Any}("MedicalScribeJobName" => MedicalScribeJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_medical_scribe_job( + MedicalScribeJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transcribe( + "DeleteMedicalScribeJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("MedicalScribeJobName" => MedicalScribeJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_medical_transcription_job(medical_transcription_job_name) delete_medical_transcription_job(medical_transcription_job_name, params::Dict{String,<:Any}) @@ -844,6 +885,50 @@ function get_call_analytics_job( ) end +""" + get_medical_scribe_job(medical_scribe_job_name) + get_medical_scribe_job(medical_scribe_job_name, params::Dict{String,<:Any}) + +Provides information about the specified Medical Scribe job. To view the status of the +specified medical transcription job, check the MedicalScribeJobStatus field. If the status +is COMPLETED, the job is finished. You can find the results at the location specified in +MedicalScribeOutput. If the status is FAILED, FailureReason provides details on why your +Medical Scribe job failed. To get a list of your Medical Scribe jobs, use the operation. + +# Arguments +- `medical_scribe_job_name`: The name of the Medical Scribe job you want information about. + Job names are case sensitive. + +""" +function get_medical_scribe_job( + MedicalScribeJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return transcribe( + "GetMedicalScribeJob", + Dict{String,Any}("MedicalScribeJobName" => MedicalScribeJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_medical_scribe_job( + MedicalScribeJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transcribe( + "GetMedicalScribeJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("MedicalScribeJobName" => MedicalScribeJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_medical_transcription_job(medical_transcription_job_name) get_medical_transcription_job(medical_transcription_job_name, params::Dict{String,<:Any}) @@ -1068,7 +1153,7 @@ operation. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of Call Analytics categories to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NextToken"`: If your ListCallAnalyticsCategories request returns more results than can be displayed, NextToken is displayed in the response with an associated string. To get the next page of results, copy this string and repeat your request, including NextToken with @@ -1106,13 +1191,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys string. The search is not case sensitive. - `"MaxResults"`: The maximum number of Call Analytics jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NextToken"`: If your ListCallAnalyticsJobs request returns more results than can be displayed, NextToken is displayed in the response with an associated string. To get the next page of results, copy this string and repeat your request, including NextToken with the value of the copied string. Repeat as needed to view all your results. - `"Status"`: Returns only Call Analytics jobs with the specified status. Jobs are ordered - by creation date, with the newest job first. If you don't include Status, all Call + by creation date, with the newest job first. If you do not include Status, all Call Analytics jobs are returned. """ function list_call_analytics_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1143,7 +1228,7 @@ specific custom language model, use the operation. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of custom language models to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NameContains"`: Returns only the custom language models that contain the specified string. The search is not case sensitive. - `"NextToken"`: If your ListLanguageModels request returns more results than can be @@ -1151,7 +1236,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys next page of results, copy this string and repeat your request, including NextToken with the value of the copied string. Repeat as needed to view all your results. - `"StatusEquals"`: Returns only custom language models with the specified status. Language - models are ordered by creation date, with the newest model first. If you don't include + models are ordered by creation date, with the newest model first. If you do not include StatusEquals, all custom language models are returned. """ function list_language_models(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1167,6 +1252,45 @@ function list_language_models( ) end +""" + list_medical_scribe_jobs() + list_medical_scribe_jobs(params::Dict{String,<:Any}) + +Provides a list of Medical Scribe jobs that match the specified criteria. If no criteria +are specified, all Medical Scribe jobs are returned. To get detailed information about a +specific Medical Scribe job, use the operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"JobNameContains"`: Returns only the Medical Scribe jobs that contain the specified + string. The search is not case sensitive. +- `"MaxResults"`: The maximum number of Medical Scribe jobs to return in each page of + results. If there are fewer results than the value that you specify, only the actual + results are returned. If you do not specify a value, a default of 5 is used. +- `"NextToken"`: If your ListMedicalScribeJobs request returns more results than can be + displayed, NextToken is displayed in the response with an associated string. To get the + next page of results, copy this string and repeat your request, including NextToken with + the value of the copied string. Repeat as needed to view all your results. +- `"Status"`: Returns only Medical Scribe jobs with the specified status. Jobs are ordered + by creation date, with the newest job first. If you do not include Status, all Medical + Scribe jobs are returned. +""" +function list_medical_scribe_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return transcribe( + "ListMedicalScribeJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_medical_scribe_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return transcribe( + "ListMedicalScribeJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_medical_transcription_jobs() list_medical_transcription_jobs(params::Dict{String,<:Any}) @@ -1181,13 +1305,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified string. The search is not case sensitive. - `"MaxResults"`: The maximum number of medical transcription jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NextToken"`: If your ListMedicalTranscriptionJobs request returns more results than can be displayed, NextToken is displayed in the response with an associated string. To get the next page of results, copy this string and repeat your request, including NextToken with the value of the copied string. Repeat as needed to view all your results. - `"Status"`: Returns only medical transcription jobs with the specified status. Jobs are - ordered by creation date, with the newest job first. If you don't include Status, all + ordered by creation date, with the newest job first. If you do not include Status, all medical transcription jobs are returned. """ function list_medical_transcription_jobs(; @@ -1222,7 +1346,7 @@ information about a specific custom medical vocabulary, use the operation. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of custom medical vocabularies to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NameContains"`: Returns only the custom medical vocabularies that contain the specified string. The search is not case sensitive. - `"NextToken"`: If your ListMedicalVocabularies request returns more results than can be @@ -1231,7 +1355,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the value of the copied string. Repeat as needed to view all your results. - `"StateEquals"`: Returns only custom medical vocabularies with the specified state. Custom vocabularies are ordered by creation date, with the newest vocabulary first. If you - don't include StateEquals, all custom medical vocabularies are returned. + do not include StateEquals, all custom medical vocabularies are returned. """ function list_medical_vocabularies(; aws_config::AbstractAWSConfig=global_aws_config()) return transcribe( @@ -1304,14 +1428,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys string. The search is not case sensitive. - `"MaxResults"`: The maximum number of transcription jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NextToken"`: If your ListTranscriptionJobs request returns more results than can be displayed, NextToken is displayed in the response with an associated string. To get the next page of results, copy this string and repeat your request, including NextToken with the value of the copied string. Repeat as needed to view all your results. - `"Status"`: Returns only transcription jobs with the specified status. Jobs are ordered - by creation date, with the newest job first. If you don't include Status, all transcription - jobs are returned. + by creation date, with the newest job first. If you do not include Status, all + transcription jobs are returned. """ function list_transcription_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) return transcribe( @@ -1341,7 +1465,7 @@ specific custom vocabulary, use the operation. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of custom vocabularies to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NameContains"`: Returns only the custom vocabularies that contain the specified string. The search is not case sensitive. - `"NextToken"`: If your ListVocabularies request returns more results than can be @@ -1349,7 +1473,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys next page of results, copy this string and repeat your request, including NextToken with the value of the copied string. Repeat as needed to view all your results. - `"StateEquals"`: Returns only custom vocabularies with the specified state. Vocabularies - are ordered by creation date, with the newest vocabulary first. If you don't include + are ordered by creation date, with the newest vocabulary first. If you do not include StateEquals, all custom medical vocabularies are returned. """ function list_vocabularies(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1377,7 +1501,7 @@ about a specific custom vocabulary filter, use the operation. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of custom vocabulary filters to return in each page of results. If there are fewer results than the value that you specify, only the actual - results are returned. If you don't specify a value, a default of 5 is used. + results are returned. If you do not specify a value, a default of 5 is used. - `"NameContains"`: Returns only the custom vocabulary filters that contain the specified string. The search is not case sensitive. - `"NextToken"`: If your ListVocabularyFilters request returns more results than can be @@ -1463,7 +1587,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If you - don't specify an encryption key, your output is encrypted with the default Amazon S3 key + do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3). If you specify a KMS key to encrypt your output, you must also specify an output location using the OutputLocation parameter. Note that the role making the request must have permission to use the specified KMS key. @@ -1474,9 +1598,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys file name (option 3), the name of your output file has a default value that matches the name you specified for your transcription job using the CallAnalyticsJobName parameter. You can specify a KMS key to encrypt your output using the OutputEncryptionKMSKeyId parameter. - If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for - server-side encryption. If you don't specify OutputLocation, your transcript is placed in a - service-managed Amazon S3 bucket and you are provided with a URI to access your transcript. + If you do not specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for + server-side encryption. If you do not specify OutputLocation, your transcript is placed in + a service-managed Amazon S3 bucket and you are provided with a URI to access your + transcript. - `"Settings"`: Specify additional optional settings in your request, including content redaction; allows you to apply custom language models, vocabulary filters, and custom vocabularies to your Call Analytics job. @@ -1513,6 +1638,135 @@ function start_call_analytics_job( ) end +""" + start_medical_scribe_job(data_access_role_arn, media, medical_scribe_job_name, output_bucket_name, settings) + start_medical_scribe_job(data_access_role_arn, media, medical_scribe_job_name, output_bucket_name, settings, params::Dict{String,<:Any}) + +Transcribes patient-clinician conversations and generates clinical notes. Amazon Web +Services HealthScribe automatically provides rich conversation transcripts, identifies +speaker roles, classifies dialogues, extracts medical terms, and generates preliminary +clinical notes. To learn more about these features, refer to Amazon Web Services +HealthScribe. To make a StartMedicalScribeJob request, you must first upload your media +file into an Amazon S3 bucket; you can then specify the Amazon S3 location of the file +using the Media parameter. You must include the following parameters in your +StartMedicalTranscriptionJob request: DataAccessRoleArn: The ARN of an IAM role with the +these minimum permissions: read permission on input file Amazon S3 bucket specified in +Media, write permission on the Amazon S3 bucket specified in OutputBucketName, and full +permissions on the KMS key specified in OutputEncryptionKMSKeyId (if set). The role should +also allow transcribe.amazonaws.com to assume it. Media (MediaFileUri): The Amazon S3 +location of your media file. MedicalScribeJobName: A custom name you create for your +MedicalScribe job that is unique within your Amazon Web Services account. +OutputBucketName: The Amazon S3 bucket where you want your output files stored. +Settings: A MedicalScribeSettings obect that must set exactly one of ShowSpeakerLabels or +ChannelIdentification to true. If ShowSpeakerLabels is true, MaxSpeakerLabels must also be +set. ChannelDefinitions: A MedicalScribeChannelDefinitions array should be set if and +only if the ChannelIdentification value of Settings is set to true. + +# Arguments +- `data_access_role_arn`: The Amazon Resource Name (ARN) of an IAM role that has + permissions to access the Amazon S3 bucket that contains your input files, write to the + output bucket, and use your KMS key if supplied. If the role that you specify doesn’t + have the appropriate permissions your request fails. IAM role ARNs have the format + arn:partition:iam::account:role/role-name-with-path. For example: + arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs. +- `media`: +- `medical_scribe_job_name`: A unique name, chosen by you, for your Medical Scribe job. + This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web + Services account. If you try to create a new job with the same name as an existing job, you + get a ConflictException error. +- `output_bucket_name`: The name of the Amazon S3 bucket where you want your Medical Scribe + output stored. Do not include the S3:// prefix of the specified bucket. Note that the role + specified in the DataAccessRoleArn request parameter must have permission to use the + specified location. You can change Amazon S3 permissions using the Amazon Web Services + Management Console. See also Permissions Required for IAM User Roles. +- `settings`: Makes it possible to control how your Medical Scribe job is processed using a + MedicalScribeSettings object. Specify ChannelIdentification if ChannelDefinitions are set. + Enabled ShowSpeakerLabels if ChannelIdentification and ChannelDefinitions are not set. One + and only one of ChannelIdentification and ShowSpeakerLabels must be set. If + ShowSpeakerLabels is set, MaxSpeakerLabels must also be set. Use Settings to specify a + vocabulary or vocabulary filter or both using VocabularyName, VocabularyFilterName. + VocabularyFilterMethod must be specified if VocabularyFilterName is set. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChannelDefinitions"`: Makes it possible to specify which speaker is on which channel. + For example, if the clinician is the first participant to speak, you would set ChannelId of + the first ChannelDefinition in the list to 0 (to indicate the first channel) and + ParticipantRole to CLINICIAN (to indicate that it's the clinician speaking). Then you would + set the ChannelId of the second ChannelDefinition in the list to 1 (to indicate the second + channel) and ParticipantRole to PATIENT (to indicate that it's the patient speaking). +- `"KMSEncryptionContext"`: A map of plain text, non-secret key:value pairs, known as + encryption context pairs, that provide an added layer of security for your data. For more + information, see KMS encryption context and Asymmetric keys in KMS. +- `"OutputEncryptionKMSKeyId"`: The KMS key you want to use to encrypt your Medical Scribe + output. If using a key located in the current Amazon Web Services account, you can specify + your KMS key in one of four ways: Use the KMS key ID itself. For example, + 1234abcd-12ab-34cd-56ef-1234567890ab. Use an alias for the KMS key ID. For example, + alias/ExampleAlias. Use the Amazon Resource Name (ARN) for the KMS key ID. For example, + arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for + the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If + using a key located in a different Amazon Web Services account than the current Amazon Web + Services account, you can specify your KMS key in one of two ways: Use the ARN for the + KMS key ID. For example, + arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for + the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If you + do not specify an encryption key, your output is encrypted with the default Amazon S3 key + (SSE-S3). Note that the role specified in the DataAccessRoleArn request parameter must have + permission to use the specified KMS key. +- `"Tags"`: Adds one or more custom tags, each in the form of a key:value pair, to the + Medica Scribe job. To learn more about using tags with Amazon Transcribe, refer to Tagging + resources. +""" +function start_medical_scribe_job( + DataAccessRoleArn, + Media, + MedicalScribeJobName, + OutputBucketName, + Settings; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transcribe( + "StartMedicalScribeJob", + Dict{String,Any}( + "DataAccessRoleArn" => DataAccessRoleArn, + "Media" => Media, + "MedicalScribeJobName" => MedicalScribeJobName, + "OutputBucketName" => OutputBucketName, + "Settings" => Settings, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_medical_scribe_job( + DataAccessRoleArn, + Media, + MedicalScribeJobName, + OutputBucketName, + Settings, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transcribe( + "StartMedicalScribeJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DataAccessRoleArn" => DataAccessRoleArn, + "Media" => Media, + "MedicalScribeJobName" => MedicalScribeJobName, + "OutputBucketName" => OutputBucketName, + "Settings" => Settings, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_medical_transcription_job(language_code, media, medical_transcription_job_name, output_bucket_name, specialty, type) start_medical_transcription_job(language_code, media, medical_transcription_job_name, output_bucket_name, specialty, type, params::Dict{String,<:Any}) @@ -1523,8 +1777,8 @@ transcription features, Amazon Transcribe Medical provides you with a robust med vocabulary and, optionally, content identification, which adds flags to personal health information (PHI). To learn more about these features, refer to How Amazon Transcribe Medical works. To make a StartMedicalTranscriptionJob request, you must first upload your -media file into an Amazon S3 bucket; you can then specify the S3 location of the file using -the Media parameter. You must include the following parameters in your +media file into an Amazon S3 bucket; you can then specify the Amazon S3 location of the +file using the Media parameter. You must include the following parameters in your StartMedicalTranscriptionJob request: region: The Amazon Web Services Region where you are making your request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and quotas. @@ -1574,7 +1828,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see KMS encryption context and Asymmetric keys in KMS. - `"MediaFormat"`: Specify the format of your input media file. - `"MediaSampleRateHertz"`: The sample rate, in hertz, of the audio track in your input - media file. If you don't specify the media sample rate, Amazon Transcribe Medical + media file. If you do not specify the media sample rate, Amazon Transcribe Medical determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical; if there's a mismatch between the value that you specify and the value detected, your job fails. Therefore, in most cases, it's advised to omit @@ -1591,7 +1845,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If you - don't specify an encryption key, your output is encrypted with the default Amazon S3 key + do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3). If you specify a KMS key to encrypt your output, you must also specify an output location using the OutputLocation parameter. Note that the role making the request must have permission to use the specified KMS key. @@ -1690,7 +1944,7 @@ custom name you create for your transcription job that is unique within your Ama Services account. Media (MediaFileUri): The Amazon S3 location of your media file. One of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages: If you know the language of your media file, specify it using the LanguageCode parameter; you can find all valid -language codes in the Supported languages table. If you don't know the languages spoken in +language codes in the Supported languages table. If you do not know the languages spoken in your media, use either IdentifyLanguage or IdentifyMultipleLanguages and let Amazon Transcribe identify the languages for you. @@ -1708,7 +1962,9 @@ Transcribe identify the languages for you. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ContentRedaction"`: Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction, you must - also include the sub-parameters: PiiEntityTypes, RedactionOutput, and RedactionType. + also include the sub-parameters: RedactionOutput and RedactionType. You can optionally + include PiiEntityTypes to choose which types of PII you want to redact. If you do not + include PiiEntityTypes in your request, all PII is redacted. - `"IdentifyLanguage"`: Enables automatic language identification in your transcription job request. Use this parameter if your media file contains only one language. If your media contains multiple languages, use IdentifyMultipleLanguages instead. If you include @@ -1780,11 +2036,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Arabic (ar-SA), your media file must be encoded at a sample rate of 16,000 Hz or higher. - `"MediaFormat"`: Specify the format of your input media file. - `"MediaSampleRateHertz"`: The sample rate, in hertz, of the audio track in your input - media file. If you don't specify the media sample rate, Amazon Transcribe determines it for - you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe. - If there's a mismatch between the value that you specify and the value detected, your job - fails. In most cases, you can omit MediaSampleRateHertz and let Amazon Transcribe determine - the sample rate. + media file. If you do not specify the media sample rate, Amazon Transcribe determines it + for you. If you specify the sample rate, it must match the rate detected by Amazon + Transcribe. If there's a mismatch between the value that you specify and the value + detected, your job fails. In most cases, you can omit MediaSampleRateHertz and let Amazon + Transcribe determine the sample rate. - `"ModelSettings"`: Specify the custom language model you want to include with your transcription job. If you include ModelSettings in your request, you must include the LanguageModelName sub-parameter. For more information, see Custom language models. @@ -1797,7 +2053,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DOC-EXAMPLE-BUCKET and OutputKey to test-files/. Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles. - If you don't specify OutputBucketName, your transcript is placed in a service-managed + If you do not specify OutputBucketName, your transcript is placed in a service-managed Amazon S3 bucket and you are provided with a URI to access your transcript. - `"OutputEncryptionKMSKeyId"`: The KMS key you want to use to encrypt your transcription output. If using a key located in the current Amazon Web Services account, you can specify @@ -1811,7 +2067,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If you - don't specify an encryption key, your output is encrypted with the default Amazon S3 key + do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3). If you specify a KMS key to encrypt your output, you must also specify an output location using the OutputLocation parameter. Note that the role making the request must have permission to use the specified KMS key. @@ -1847,6 +2103,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. +- `"ToxicityDetection"`: Enables toxic speech detection in your transcript. If you include + ToxicityDetection in your request, you must also include ToxicityCategories. For + information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic + speech. """ function start_transcription_job( Media, TranscriptionJobName; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/transfer.jl b/src/services/transfer.jl index b2d3226404..8c21b3aeaa 100644 --- a/src/services/transfer.jl +++ b/src/services/transfer.jl @@ -36,7 +36,8 @@ the access to the correct set of users who need this ability. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HomeDirectory"`: The landing directory (folder) for a user when they log in to the - server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. + server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. The + HomeDirectory parameter is only used if HomeDirectoryType is set to PATH. - `"HomeDirectoryMappings"`: Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made @@ -51,9 +52,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys chroot. [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ] - `"HomeDirectoryType"`: The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will - see the absolute Amazon S3 bucket or EFS paths as is in their file transfer protocol - clients. If you set it LOGICAL, you need to provide mappings in the HomeDirectoryMappings - for how you want to make Amazon S3 or Amazon EFS paths visible to your users. + see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol + clients. If you set it to LOGICAL, you need to provide mappings in the + HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to + your users. If HomeDirectoryType is LOGICAL, you must provide mappings, using the + HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you + provide an absolute path using the HomeDirectory parameter. You cannot have both + HomeDirectory and HomeDirectoryMappings in your template. - `"Policy"`: A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy @@ -112,15 +117,24 @@ certificate, and other attributes. The partner is identified with the PartnerPro the AS2 process is identified with the LocalProfileId. # Arguments -- `access_role`: With AS2, you can send files by calling StartFileTransfer and specifying - the file paths in the request parameter, SendFilePaths. We use the file’s parent - directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is - /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we - receive them from the partner, and write a final JSON file containing relevant metadata of - the transmission. So, the AccessRole needs to provide read and write access to the parent - directory of the file location used in the StartFileTransfer request. Additionally, you - need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. +- `access_role`: Connectors are used to send files using either the AS2 or SFTP protocol. + For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access + Management role to use. For AS2 connectors With AS2, you can send files by calling + StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We + use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, + parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store + the MDN when we receive them from the partner, and write a final JSON file containing + relevant metadata of the transmission. So, the AccessRole needs to provide read and write + access to the parent directory of the file location used in the StartFileTransfer request. + Additionally, you need to provide read and write access to the parent directory of the + files that you intend to send with StartFileTransfer. If you are using Basic authentication + for your AS2 connector, the access role requires the secretsmanager:GetSecretValue + permission for the secret. If the secret is encrypted using a customer-managed key instead + of the Amazon Web Services managed key in Secrets Manager, then the role also needs the + kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role + provides read and write access to the parent directory of the file location that's used in + the StartFileTransfer request. Additionally, make sure that the role provides + secretsmanager:GetSecretValue permission to Secrets Manager. - `base_directory`: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory. - `local_profile_id`: A unique identifier for the AS2 local profile. @@ -185,49 +199,60 @@ function create_agreement( end """ - create_connector(access_role, as2_config, url) - create_connector(access_role, as2_config, url, params::Dict{String,<:Any}) + create_connector(access_role, url) + create_connector(access_role, url, params::Dict{String,<:Any}) -Creates the connector, which captures the parameters for an outbound connection for the AS2 -protocol. The connector is required for sending files to an externally hosted AS2 server. -For more details about connectors, see Create AS2 connectors. +Creates the connector, which captures the parameters for a connection for the AS2 or SFTP +protocol. For AS2, the connector is required for sending files to an externally hosted AS2 +server. For SFTP, the connector is required when sending files to an SFTP server or +receiving files from an SFTP server. For more details about connectors, see Configure AS2 +connectors and Create SFTP connectors. You must specify exactly one configuration object: +either for AS2 (As2Config) or SFTP (SftpConfig). # Arguments -- `access_role`: With AS2, you can send files by calling StartFileTransfer and specifying - the file paths in the request parameter, SendFilePaths. We use the file’s parent - directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is - /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we - receive them from the partner, and write a final JSON file containing relevant metadata of - the transmission. So, the AccessRole needs to provide read and write access to the parent - directory of the file location used in the StartFileTransfer request. Additionally, you - need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. -- `as2_config`: A structure that contains the parameters for a connector object. -- `url`: The URL of the partner's AS2 endpoint. +- `access_role`: Connectors are used to send files using either the AS2 or SFTP protocol. + For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access + Management role to use. For AS2 connectors With AS2, you can send files by calling + StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We + use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, + parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store + the MDN when we receive them from the partner, and write a final JSON file containing + relevant metadata of the transmission. So, the AccessRole needs to provide read and write + access to the parent directory of the file location used in the StartFileTransfer request. + Additionally, you need to provide read and write access to the parent directory of the + files that you intend to send with StartFileTransfer. If you are using Basic authentication + for your AS2 connector, the access role requires the secretsmanager:GetSecretValue + permission for the secret. If the secret is encrypted using a customer-managed key instead + of the Amazon Web Services managed key in Secrets Manager, then the role also needs the + kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role + provides read and write access to the parent directory of the file location that's used in + the StartFileTransfer request. Additionally, make sure that the role provides + secretsmanager:GetSecretValue permission to Secrets Manager. +- `url`: The URL of the partner's AS2 or SFTP endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"As2Config"`: A structure that contains the parameters for an AS2 connector object. - `"LoggingRole"`: The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs. +- `"SecurityPolicyName"`: Specifies the name of the security policy for the connector. +- `"SftpConfig"`: A structure that contains the parameters for an SFTP connector object. - `"Tags"`: Key-value pairs that can be used to group and search for connectors. Tags are metadata attached to connectors for any purpose. """ function create_connector( - AccessRole, As2Config, Url; aws_config::AbstractAWSConfig=global_aws_config() + AccessRole, Url; aws_config::AbstractAWSConfig=global_aws_config() ) return transfer( "CreateConnector", - Dict{String,Any}( - "AccessRole" => AccessRole, "As2Config" => As2Config, "Url" => Url - ); + Dict{String,Any}("AccessRole" => AccessRole, "Url" => Url); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_connector( AccessRole, - As2Config, Url, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -236,11 +261,7 @@ function create_connector( "CreateConnector", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "AccessRole" => AccessRole, "As2Config" => As2Config, "Url" => Url - ), - params, + _merge, Dict{String,Any}("AccessRole" => AccessRole, "Url" => Url), params ), ); aws_config=aws_config, @@ -415,8 +436,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the IdentityProviderType can be set any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY. If Protocol includes AS2, then the EndpointType must be VPC, and domain must be Amazon S3. -- `"SecurityPolicyName"`: Specifies the name of the security policy that is attached to the - server. +- `"S3StorageOptions"`: Specifies whether or not performance for your Amazon S3 directories + is optimized. This is disabled by default. By default, home directory mappings have a TYPE + of DIRECTORY. If you enable this option, you would then need to explicitly set the + HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target. +- `"SecurityPolicyName"`: Specifies the name of the security policy for the server. +- `"StructuredLogDestinations"`: Specifies the log groups to which your server logs are + sent. To specify a log group, you must provide the ARN for an existing log group. In this + case, the format of the log group is as follows: + arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, + arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously + specified a log group for a server, you can clear it, and in effect turn off structured + logging, by providing an empty value for this parameter in an update-server call. For + example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations - `"Tags"`: Key-value pairs that can be used to group and search for servers. - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute @@ -463,7 +495,8 @@ with tags that can be used to group and search for users. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HomeDirectory"`: The landing directory (folder) for a user when they log in to the - server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. + server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. The + HomeDirectory parameter is only used if HomeDirectoryType is set to PATH. - `"HomeDirectoryMappings"`: Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made @@ -474,13 +507,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ] In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target - to the HomeDirectory parameter value. The following is an Entry and Target pair example for - chroot. [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ] + to the value the user should see for their home directory when they log in. The following + is an Entry and Target pair example for chroot. [ { \"Entry\": \"/\", \"Target\": + \"/bucket_name/home/mydirectory\" } ] - `"HomeDirectoryType"`: The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will - see the absolute Amazon S3 bucket or EFS paths as is in their file transfer protocol - clients. If you set it LOGICAL, you need to provide mappings in the HomeDirectoryMappings - for how you want to make Amazon S3 or Amazon EFS paths visible to your users. + see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol + clients. If you set it to LOGICAL, you need to provide mappings in the + HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to + your users. If HomeDirectoryType is LOGICAL, you must provide mappings, using the + HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you + provide an absolute path using the HomeDirectory parameter. You cannot have both + HomeDirectory and HomeDirectoryMappings in your template. - `"Policy"`: A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy @@ -716,7 +754,7 @@ end delete_connector(connector_id) delete_connector(connector_id, params::Dict{String,<:Any}) -Deletes the agreement that's specified in the provided ConnectorId. +Deletes the connector that's specified in the provided ConnectorId. # Arguments - `connector_id`: The unique identifier for the connector. @@ -1268,13 +1306,14 @@ end describe_security_policy(security_policy_name) describe_security_policy(security_policy_name, params::Dict{String,<:Any}) -Describes the security policy that is attached to your file transfer protocol-enabled -server. The response contains a description of the security policy's properties. For more -information about security policies, see Working with security policies. +Describes the security policy that is attached to your server or SFTP connector. The +response contains a description of the security policy's properties. For more information +about security policies, see Working with security policies for servers or Working with +security policies for SFTP connectors. # Arguments -- `security_policy_name`: Specifies the name of the security policy that is attached to the - server. +- `security_policy_name`: Specify the text name of the security policy for which you want + the details. """ function describe_security_policy( @@ -1428,7 +1467,9 @@ profiles and partner profiles. example, --certificate file://encryption-cert.pem. Alternatively, you can provide the raw content. For the SDK, specify the raw content of a certificate file. For example, --certificate \"`cat encryption-cert.pem`\". -- `usage`: Specifies whether this certificate is used for signing or encryption. +- `usage`: Specifies how this certificate is used. It can be used in the following ways: + SIGNING: For signing AS2 messages ENCRYPTION: For encrypting AS2 messages TLS: For + securing AS2 communications sent over HTTPS # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1833,8 +1874,9 @@ end list_security_policies() list_security_policies(params::Dict{String,<:Any}) -Lists the security policies that are attached to your file transfer protocol-enabled -servers. +Lists the security policies that are attached to your servers and SFTP connectors. For more +information about security policies, see Working with security policies for servers or +Working with security policies for SFTP connectors. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1940,9 +1982,9 @@ ServerId parameter. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: Specifies the number of users to return as a response to the ListUsers request. -- `"NextToken"`: When you can get additional results from the ListUsers call, a NextToken - parameter is returned in the output. You can then pass in a subsequent command to the - NextToken parameter to continue listing additional users. +- `"NextToken"`: If there are additional results from the ListUsers call, a NextToken + parameter is returned in the output. You can then pass the NextToken to a subsequent + ListUsers command, to continue listing additional users. """ function list_users(ServerId; aws_config::AbstractAWSConfig=global_aws_config()) return transfer( @@ -2055,41 +2097,76 @@ function send_workflow_step_state( end """ - start_file_transfer(connector_id, send_file_paths) - start_file_transfer(connector_id, send_file_paths, params::Dict{String,<:Any}) + start_directory_listing(connector_id, output_directory_path, remote_directory_path) + start_directory_listing(connector_id, output_directory_path, remote_directory_path, params::Dict{String,<:Any}) -Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId and -the file paths for where to send the files. +Retrieves a list of the contents of a directory from a remote SFTP server. You specify the +connector ID, the output path, and the remote directory path. You can also specify the +optional MaxItems value to control the maximum number of items that are listed from the +remote directory. This API returns a list of all files and directories in the remote +directory (up to the maximum value), but does not return files or folders in +sub-directories. That is, it only returns a list of files and directories one-level deep. +After you receive the listing file, you can provide the files that you want to transfer to +the RetrieveFilePaths parameter of the StartFileTransfer API call. The naming convention +for the output file is connector-ID-listing-ID.json. The output file contains the +following information: filePath: the complete path of a remote file, relative to the +directory of the listing request for your SFTP connector on the remote server. +modifiedTimestamp: the last time the file was modified, in UTC time format. This field is +optional. If the remote file attributes don't contain a timestamp, it is omitted from the +file listing. size: the size of the file, in bytes. This field is optional. If the +remote file attributes don't contain a file size, it is omitted from the file listing. +path: the complete path of a remote directory, relative to the directory of the listing +request for your SFTP connector on the remote server. truncated: a flag indicating +whether the list output contains all of the items contained in the remote directory or not. +If your Truncated output value is true, you can increase the value provided in the optional +max-items input attribute to be able to list more items (up to the maximum allowed list +size of 10,000 items). # Arguments - `connector_id`: The unique identifier for the connector. -- `send_file_paths`: An array of strings. Each string represents the absolute path for one - outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt . +- `output_directory_path`: Specifies the path (bucket and prefix) in Amazon S3 storage to + store the results of the directory listing. +- `remote_directory_path`: Specifies the directory on the remote SFTP server for which you + want to list its contents. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxItems"`: An optional parameter where you can specify the maximum number of + file/directory names to retrieve. The default value is 1,000. """ -function start_file_transfer( - ConnectorId, SendFilePaths; aws_config::AbstractAWSConfig=global_aws_config() +function start_directory_listing( + ConnectorId, + OutputDirectoryPath, + RemoteDirectoryPath; + aws_config::AbstractAWSConfig=global_aws_config(), ) return transfer( - "StartFileTransfer", - Dict{String,Any}("ConnectorId" => ConnectorId, "SendFilePaths" => SendFilePaths); + "StartDirectoryListing", + Dict{String,Any}( + "ConnectorId" => ConnectorId, + "OutputDirectoryPath" => OutputDirectoryPath, + "RemoteDirectoryPath" => RemoteDirectoryPath, + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function start_file_transfer( +function start_directory_listing( ConnectorId, - SendFilePaths, + OutputDirectoryPath, + RemoteDirectoryPath, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return transfer( - "StartFileTransfer", + "StartDirectoryListing", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( - "ConnectorId" => ConnectorId, "SendFilePaths" => SendFilePaths + "ConnectorId" => ConnectorId, + "OutputDirectoryPath" => OutputDirectoryPath, + "RemoteDirectoryPath" => RemoteDirectoryPath, ), params, ), @@ -2099,6 +2176,62 @@ function start_file_transfer( ) end +""" + start_file_transfer(connector_id) + start_file_transfer(connector_id, params::Dict{String,<:Any}) + +Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP +server. For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths +to identify the files you want to transfer. For an SFTP connector, the file transfer can +be either outbound or inbound. In both cases, you specify the ConnectorId. Depending on the +direction of the transfer, you also specify the following items: If you are transferring +file from a partner's SFTP server to Amazon Web Services storage, you specify one or more +RetrieveFilePaths to identify the files you want to transfer, and a LocalDirectoryPath to +specify the destination folder. If you are transferring file to a partner's SFTP server +from Amazon Web Services storage, you specify one or more SendFilePaths to identify the +files you want to transfer, and a RemoteDirectoryPath to specify the destination folder. + +# Arguments +- `connector_id`: The unique identifier for the connector. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LocalDirectoryPath"`: For an inbound transfer, the LocaDirectoryPath specifies the + destination for one or more files that are transferred from the partner's SFTP server. +- `"RemoteDirectoryPath"`: For an outbound transfer, the RemoteDirectoryPath specifies the + destination for one or more files that are transferred to the partner's SFTP server. If you + don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP + user's home directory. +- `"RetrieveFilePaths"`: One or more source paths for the partner's SFTP server. Each + string represents a source file path for one inbound file transfer. +- `"SendFilePaths"`: One or more source paths for the Amazon S3 storage. Each string + represents a source file path for one outbound file transfer. For example, + DOC-EXAMPLE-BUCKET/myfile.txt . Replace DOC-EXAMPLE-BUCKET with one of your actual + buckets. +""" +function start_file_transfer(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) + return transfer( + "StartFileTransfer", + Dict{String,Any}("ConnectorId" => ConnectorId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_file_transfer( + ConnectorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transfer( + "StartFileTransfer", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectorId" => ConnectorId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_server(server_id) start_server(server_id, params::Dict{String,<:Any}) @@ -2216,6 +2349,41 @@ function tag_resource( ) end +""" + test_connection(connector_id) + test_connection(connector_id, params::Dict{String,<:Any}) + +Tests whether your SFTP connector is set up successfully. We highly recommend that you call +this operation to test your ability to transfer files between local Amazon Web Services +storage and a trading partner's SFTP server. + +# Arguments +- `connector_id`: The unique identifier for the connector. + +""" +function test_connection(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) + return transfer( + "TestConnection", + Dict{String,Any}("ConnectorId" => ConnectorId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_connection( + ConnectorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transfer( + "TestConnection", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectorId" => ConnectorId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ test_identity_provider(server_id, user_name) test_identity_provider(server_id, user_name, params::Dict{String,<:Any}) @@ -2346,7 +2514,8 @@ parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HomeDirectory"`: The landing directory (folder) for a user when they log in to the - server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. + server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. The + HomeDirectory parameter is only used if HomeDirectoryType is set to PATH. - `"HomeDirectoryMappings"`: Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made @@ -2361,9 +2530,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys chroot. [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ] - `"HomeDirectoryType"`: The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will - see the absolute Amazon S3 bucket or EFS paths as is in their file transfer protocol - clients. If you set it LOGICAL, you need to provide mappings in the HomeDirectoryMappings - for how you want to make Amazon S3 or Amazon EFS paths visible to your users. + see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol + clients. If you set it to LOGICAL, you need to provide mappings in the + HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to + your users. If HomeDirectoryType is LOGICAL, you must provide mappings, using the + HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you + provide an absolute path using the HomeDirectory parameter. You cannot have both + HomeDirectory and HomeDirectoryMappings in your template. - `"Policy"`: A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy @@ -2428,15 +2601,24 @@ parameters to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AccessRole"`: With AS2, you can send files by calling StartFileTransfer and specifying - the file paths in the request parameter, SendFilePaths. We use the file’s parent - directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is - /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we - receive them from the partner, and write a final JSON file containing relevant metadata of - the transmission. So, the AccessRole needs to provide read and write access to the parent - directory of the file location used in the StartFileTransfer request. Additionally, you - need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. +- `"AccessRole"`: Connectors are used to send files using either the AS2 or SFTP protocol. + For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access + Management role to use. For AS2 connectors With AS2, you can send files by calling + StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We + use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, + parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store + the MDN when we receive them from the partner, and write a final JSON file containing + relevant metadata of the transmission. So, the AccessRole needs to provide read and write + access to the parent directory of the file location used in the StartFileTransfer request. + Additionally, you need to provide read and write access to the parent directory of the + files that you intend to send with StartFileTransfer. If you are using Basic authentication + for your AS2 connector, the access role requires the secretsmanager:GetSecretValue + permission for the secret. If the secret is encrypted using a customer-managed key instead + of the Amazon Web Services managed key in Secrets Manager, then the role also needs the + kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role + provides read and write access to the parent directory of the file location that's used in + the StartFileTransfer request. Additionally, make sure that the role provides + secretsmanager:GetSecretValue permission to Secrets Manager. - `"BaseDirectory"`: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory . @@ -2531,20 +2713,31 @@ connector that you want to update, along with the new values for the parameters # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AccessRole"`: With AS2, you can send files by calling StartFileTransfer and specifying - the file paths in the request parameter, SendFilePaths. We use the file’s parent - directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is - /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we - receive them from the partner, and write a final JSON file containing relevant metadata of - the transmission. So, the AccessRole needs to provide read and write access to the parent - directory of the file location used in the StartFileTransfer request. Additionally, you - need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. -- `"As2Config"`: A structure that contains the parameters for a connector object. +- `"AccessRole"`: Connectors are used to send files using either the AS2 or SFTP protocol. + For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access + Management role to use. For AS2 connectors With AS2, you can send files by calling + StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We + use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, + parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store + the MDN when we receive them from the partner, and write a final JSON file containing + relevant metadata of the transmission. So, the AccessRole needs to provide read and write + access to the parent directory of the file location used in the StartFileTransfer request. + Additionally, you need to provide read and write access to the parent directory of the + files that you intend to send with StartFileTransfer. If you are using Basic authentication + for your AS2 connector, the access role requires the secretsmanager:GetSecretValue + permission for the secret. If the secret is encrypted using a customer-managed key instead + of the Amazon Web Services managed key in Secrets Manager, then the role also needs the + kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role + provides read and write access to the parent directory of the file location that's used in + the StartFileTransfer request. Additionally, make sure that the role provides + secretsmanager:GetSecretValue permission to Secrets Manager. +- `"As2Config"`: A structure that contains the parameters for an AS2 connector object. - `"LoggingRole"`: The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs. -- `"Url"`: The URL of the partner's AS2 endpoint. +- `"SecurityPolicyName"`: Specifies the name of the security policy for the connector. +- `"SftpConfig"`: A structure that contains the parameters for an SFTP connector object. +- `"Url"`: The URL of the partner's AS2 or SFTP endpoint. """ function update_connector(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) return transfer( @@ -2757,8 +2950,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the IdentityProviderType can be set any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY. If Protocol includes AS2, then the EndpointType must be VPC, and domain must be Amazon S3. -- `"SecurityPolicyName"`: Specifies the name of the security policy that is attached to the - server. +- `"S3StorageOptions"`: Specifies whether or not performance for your Amazon S3 directories + is optimized. This is disabled by default. By default, home directory mappings have a TYPE + of DIRECTORY. If you enable this option, you would then need to explicitly set the + HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target. +- `"SecurityPolicyName"`: Specifies the name of the security policy for the server. +- `"StructuredLogDestinations"`: Specifies the log groups to which your server logs are + sent. To specify a log group, you must provide the ARN for an existing log group. In this + case, the format of the log group is as follows: + arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, + arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously + specified a log group for a server, you can clear it, and in effect turn off structured + logging, by providing an empty value for this parameter in an update-server call. For + example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and @@ -2797,7 +3001,16 @@ end Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify. The -response returns the ServerId and the UserName for the updated user. +response returns the ServerId and the UserName for the updated user. In the console, you +can select Restricted when you create or update a user. This ensures that the user can't +access anything outside of their home directory. The programmatic way to configure this +behavior is to update the user. Set their HomeDirectoryType to LOGICAL, and specify +HomeDirectoryMappings with Entry as root (/) and Target as their home directory. For +example, if the user's home directory is /test/admin-user, the following command updates +the user so that their configuration in the console shows the Restricted flag as selected. + aws transfer update-user --server-id <server-id> --user-name admin-user +--home-directory-type LOGICAL --home-directory-mappings \"[{\"Entry\":\"/\", +\"Target\":\"/test/admin-user\"}]\" # Arguments - `server_id`: A system-assigned unique identifier for a Transfer Family server instance @@ -2811,7 +3024,8 @@ response returns the ServerId and the UserName for the updated user. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"HomeDirectory"`: The landing directory (folder) for a user when they log in to the - server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. + server using the client. A HomeDirectory example is /bucket_name/home/mydirectory. The + HomeDirectory parameter is only used if HomeDirectoryType is set to PATH. - `"HomeDirectoryMappings"`: Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made @@ -2826,9 +3040,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys chroot. [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ] - `"HomeDirectoryType"`: The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will - see the absolute Amazon S3 bucket or EFS paths as is in their file transfer protocol - clients. If you set it LOGICAL, you need to provide mappings in the HomeDirectoryMappings - for how you want to make Amazon S3 or Amazon EFS paths visible to your users. + see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol + clients. If you set it to LOGICAL, you need to provide mappings in the + HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to + your users. If HomeDirectoryType is LOGICAL, you must provide mappings, using the + HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you + provide an absolute path using the HomeDirectory parameter. You cannot have both + HomeDirectory and HomeDirectoryMappings in your template. - `"Policy"`: A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy diff --git a/src/services/translate.jl b/src/services/translate.jl index e02e88d61d..fe68449c7d 100644 --- a/src/services/translate.jl +++ b/src/services/translate.jl @@ -477,8 +477,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys than other jobs that don't use parallel data. For more information, see Amazon Translate pricing. For a list of available parallel data resources, use the ListParallelData operation. For more information, see Customizing your translations with parallel data. -- `"Settings"`: Settings to configure your translation output, including the option to set - the formality level of the output text and the option to mask profane words and phrases. +- `"Settings"`: Settings to configure your translation output. You can configure the + following options: Brevity: not supported. Formality: sets the formality level of the + output text. Profanity: masks profane words and phrases in your translation output. - `"TerminologyNames"`: The name of a custom terminology resource to add to the translation job. This resource lists examples source terms and the desired translation for each term. This parameter accepts only one custom terminology resource. If you specify multiple target @@ -623,26 +624,31 @@ end translate_document(document, source_language_code, target_language_code, params::Dict{String,<:Any}) Translates the input document from the source language to the target language. This -synchronous operation supports plain text or HTML for the input document. TranslateDocument -supports translations from English to any supported language, and from any supported -language to English. Therefore, specify either the source language code or the target -language code as “en” (English). TranslateDocument does not support language -auto-detection. If you set the Formality parameter, the request will fail if the target -language does not support formality. For a list of target languages that support formality, -see Setting formality. +synchronous operation supports text, HTML, or Word documents as the input document. +TranslateDocument supports translations from English to any supported language, and from +any supported language to English. Therefore, specify either the source language code or +the target language code as “en” (English). If you set the Formality parameter, the +request will fail if the target language does not support formality. For a list of target +languages that support formality, see Setting formality. # Arguments - `document`: The content and content type for the document to be translated. The document size must not exceed 100 KB. -- `source_language_code`: The language code for the language of the source text. Do not use - auto, because TranslateDocument does not support language auto-detection. For a list of - supported language codes, see Supported languages. +- `source_language_code`: The language code for the language of the source text. For a list + of supported language codes, see Supported languages. To have Amazon Translate determine + the source language of your text, you can specify auto in the SourceLanguageCode field. If + you specify auto, Amazon Translate will call Amazon Comprehend to determine the source + language. If you specify auto, you must send the TranslateDocument request in a region + that supports Amazon Comprehend. Otherwise, the request returns an error indicating that + autodetect is not supported. - `target_language_code`: The language code requested for the translated document. For a list of supported language codes, see Supported languages. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Settings"`: +- `"Settings"`: Settings to configure your translation output. You can configure the + following options: Brevity: not supported. Formality: sets the formality level of the + output text. Profanity: masks profane words and phrases in your translation output. - `"TerminologyNames"`: The name of a terminology list file to add to the translation job. This file provides source terms and the desired translation for each term. A terminology list can contain a maximum of 256 terms. You can use one custom terminology resource in @@ -714,8 +720,10 @@ available languages and language codes, see Supported languages. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Settings"`: Settings to configure your translation output, including the option to set - the formality level of the output text and the option to mask profane words and phrases. +- `"Settings"`: Settings to configure your translation output. You can configure the + following options: Brevity: reduces the length of the translated output for most + translations. Formality: sets the formality level of the output text. Profanity: masks + profane words and phrases in your translation output. - `"TerminologyNames"`: The name of a terminology list file to add to the translation job. This file provides source terms and the desired translation for each term. A terminology list can contain a maximum of 256 terms. You can use one custom terminology resource in diff --git a/src/services/trustedadvisor.jl b/src/services/trustedadvisor.jl new file mode 100644 index 0000000000..4a48afe304 --- /dev/null +++ b/src/services/trustedadvisor.jl @@ -0,0 +1,450 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: trustedadvisor +using AWS.Compat +using AWS.UUIDs + +""" + batch_update_recommendation_resource_exclusion(recommendation_resource_exclusions) + batch_update_recommendation_resource_exclusion(recommendation_resource_exclusions, params::Dict{String,<:Any}) + +Update one or more exclusion status for a list of recommendation resources + +# Arguments +- `recommendation_resource_exclusions`: A list of recommendation resource ARNs and + exclusion status to update + +""" +function batch_update_recommendation_resource_exclusion( + recommendationResourceExclusions; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "PUT", + "/v1/batch-update-recommendation-resource-exclusion", + Dict{String,Any}( + "recommendationResourceExclusions" => recommendationResourceExclusions + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_update_recommendation_resource_exclusion( + recommendationResourceExclusions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "PUT", + "/v1/batch-update-recommendation-resource-exclusion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "recommendationResourceExclusions" => recommendationResourceExclusions + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_organization_recommendation(organization_recommendation_identifier) + get_organization_recommendation(organization_recommendation_identifier, params::Dict{String,<:Any}) + +Get a specific recommendation within an AWS Organizations organization. This API supports +only prioritized recommendations. + +# Arguments +- `organization_recommendation_identifier`: The Recommendation identifier + +""" +function get_organization_recommendation( + organizationRecommendationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_organization_recommendation( + organizationRecommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_recommendation(recommendation_identifier) + get_recommendation(recommendation_identifier, params::Dict{String,<:Any}) + +Get a specific Recommendation + +# Arguments +- `recommendation_identifier`: The Recommendation identifier + +""" +function get_recommendation( + recommendationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/recommendations/$(recommendationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_recommendation( + recommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "GET", + "/v1/recommendations/$(recommendationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_checks() + list_checks(params::Dict{String,<:Any}) + +List a filterable set of Checks + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"awsService"`: The aws service associated with the check +- `"language"`: The ISO 639-1 code for the language that you want your checks to appear in. +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"pillar"`: The pillar of the check +- `"source"`: The source of the check +""" +function list_checks(; aws_config::AbstractAWSConfig=global_aws_config()) + return trustedadvisor( + "GET", "/v1/checks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_checks( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", "/v1/checks", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_organization_recommendation_accounts(organization_recommendation_identifier) + list_organization_recommendation_accounts(organization_recommendation_identifier, params::Dict{String,<:Any}) + +Lists the accounts that own the resources for an organization aggregate recommendation. +This API only supports prioritized recommendations. + +# Arguments +- `organization_recommendation_identifier`: The Recommendation identifier + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"affectedAccountId"`: An account affected by this organization recommendation +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_organization_recommendation_accounts( + organizationRecommendationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/accounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_organization_recommendation_accounts( + organizationRecommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/accounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_organization_recommendation_resources(organization_recommendation_identifier) + list_organization_recommendation_resources(organization_recommendation_identifier, params::Dict{String,<:Any}) + +List Resources of a Recommendation within an Organization. This API only supports +prioritized recommendations. + +# Arguments +- `organization_recommendation_identifier`: The AWS Organization organization's + Recommendation identifier + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"affectedAccountId"`: An account affected by this organization recommendation +- `"exclusionStatus"`: The exclusion status of the resource +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"regionCode"`: The AWS Region code of the resource +- `"status"`: The status of the resource +""" +function list_organization_recommendation_resources( + organizationRecommendationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/resources"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_organization_recommendation_resources( + organizationRecommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/resources", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_organization_recommendations() + list_organization_recommendations(params::Dict{String,<:Any}) + +List a filterable set of Recommendations within an Organization. This API only supports +prioritized recommendations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"afterLastUpdatedAt"`: After the last update of the Recommendation +- `"awsService"`: The aws service associated with the Recommendation +- `"beforeLastUpdatedAt"`: Before the last update of the Recommendation +- `"checkIdentifier"`: The check identifier of the Recommendation +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"pillar"`: The pillar of the Recommendation +- `"source"`: The source of the Recommendation +- `"status"`: The status of the Recommendation +- `"type"`: The type of the Recommendation +""" +function list_organization_recommendations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_organization_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/organization-recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_recommendation_resources(recommendation_identifier) + list_recommendation_resources(recommendation_identifier, params::Dict{String,<:Any}) + +List Resources of a Recommendation + +# Arguments +- `recommendation_identifier`: The Recommendation identifier + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"exclusionStatus"`: The exclusion status of the resource +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"regionCode"`: The AWS Region code of the resource +- `"status"`: The status of the resource +""" +function list_recommendation_resources( + recommendationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/recommendations/$(recommendationIdentifier)/resources"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_recommendation_resources( + recommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "GET", + "/v1/recommendations/$(recommendationIdentifier)/resources", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_recommendations() + list_recommendations(params::Dict{String,<:Any}) + +List a filterable set of Recommendations + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"afterLastUpdatedAt"`: After the last update of the Recommendation +- `"awsService"`: The aws service associated with the Recommendation +- `"beforeLastUpdatedAt"`: Before the last update of the Recommendation +- `"checkIdentifier"`: The check identifier of the Recommendation +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"pillar"`: The pillar of the Recommendation +- `"source"`: The source of the Recommendation +- `"status"`: The status of the Recommendation +- `"type"`: The type of the Recommendation +""" +function list_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return trustedadvisor( + "GET", "/v1/recommendations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return trustedadvisor( + "GET", + "/v1/recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_organization_recommendation_lifecycle(lifecycle_stage, organization_recommendation_identifier) + update_organization_recommendation_lifecycle(lifecycle_stage, organization_recommendation_identifier, params::Dict{String,<:Any}) + +Update the lifecycle of a Recommendation within an Organization. This API only supports +prioritized recommendations. + +# Arguments +- `lifecycle_stage`: The new lifecycle stage +- `organization_recommendation_identifier`: The Recommendation identifier for AWS Trusted + Advisor Priority recommendations + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"updateReason"`: Reason for the lifecycle stage change +- `"updateReasonCode"`: Reason code for the lifecycle state change +""" +function update_organization_recommendation_lifecycle( + lifecycleStage, + organizationRecommendationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "PUT", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/lifecycle", + Dict{String,Any}("lifecycleStage" => lifecycleStage); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_organization_recommendation_lifecycle( + lifecycleStage, + organizationRecommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "PUT", + "/v1/organization-recommendations/$(organizationRecommendationIdentifier)/lifecycle", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("lifecycleStage" => lifecycleStage), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_recommendation_lifecycle(lifecycle_stage, recommendation_identifier) + update_recommendation_lifecycle(lifecycle_stage, recommendation_identifier, params::Dict{String,<:Any}) + +Update the lifecyle of a Recommendation. This API only supports prioritized recommendations. + +# Arguments +- `lifecycle_stage`: The new lifecycle stage +- `recommendation_identifier`: The Recommendation identifier for AWS Trusted Advisor + Priority recommendations + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"updateReason"`: Reason for the lifecycle stage change +- `"updateReasonCode"`: Reason code for the lifecycle state change +""" +function update_recommendation_lifecycle( + lifecycleStage, + recommendationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "PUT", + "/v1/recommendations/$(recommendationIdentifier)/lifecycle", + Dict{String,Any}("lifecycleStage" => lifecycleStage); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_recommendation_lifecycle( + lifecycleStage, + recommendationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return trustedadvisor( + "PUT", + "/v1/recommendations/$(recommendationIdentifier)/lifecycle", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("lifecycleStage" => lifecycleStage), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/verifiedpermissions.jl b/src/services/verifiedpermissions.jl index af1304036f..33d1f77ac6 100644 --- a/src/services/verifiedpermissions.jl +++ b/src/services/verifiedpermissions.jl @@ -4,34 +4,167 @@ using AWS.AWSServices: verifiedpermissions using AWS.Compat using AWS.UUIDs +""" + batch_is_authorized(policy_store_id, requests) + batch_is_authorized(policy_store_id, requests, params::Dict{String,<:Any}) + +Makes a series of decisions about multiple authorization requests for one principal or +resource. Each request contains the equivalent content of an IsAuthorized request: +principal, action, resource, and context. Either the principal or the resource parameter +must be identical across all requests. For example, Verified Permissions won't evaluate a +pair of requests where bob views photo1 and alice views photo2. Authorization of bob to +view photo1 and photo2, or bob and alice to view photo1, are valid batches. The request is +evaluated against all policies in the specified policy store that match the entities that +you declare. The result of the decisions is a series of Allow or Deny responses, along with +the IDs of the policies that produced each decision. The entities of a BatchIsAuthorized +API request can contain up to 100 principals and up to 100 resources. The requests of a +BatchIsAuthorized API request can contain up to 30 requests. The BatchIsAuthorized +operation doesn't have its own IAM permission. To authorize this operation for Amazon Web +Services principals, include the permission verifiedpermissions:IsAuthorized in their IAM +policies. + +# Arguments +- `policy_store_id`: Specifies the ID of the policy store. Policies in this policy store + will be used to make the authorization decisions for the input. +- `requests`: An array of up to 30 requests that you want Verified Permissions to evaluate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"entities"`: Specifies the list of resources and principals and their associated + attributes that Verified Permissions can examine when evaluating the policies. You can + include only principal and resource entities in this parameter; you can't include actions. + You must specify actions in the schema. +""" +function batch_is_authorized( + policyStoreId, requests; aws_config::AbstractAWSConfig=global_aws_config() +) + return verifiedpermissions( + "BatchIsAuthorized", + Dict{String,Any}("policyStoreId" => policyStoreId, "requests" => requests); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_is_authorized( + policyStoreId, + requests, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return verifiedpermissions( + "BatchIsAuthorized", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("policyStoreId" => policyStoreId, "requests" => requests), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_is_authorized_with_token(policy_store_id, requests) + batch_is_authorized_with_token(policy_store_id, requests, params::Dict{String,<:Any}) + +Makes a series of decisions about multiple authorization requests for one token. The +principal in this request comes from an external identity source in the form of an identity +or access token, formatted as a JSON web token (JWT). The information in the parameters can +also define additional context that Verified Permissions can include in the evaluations. +The request is evaluated against all policies in the specified policy store that match the +entities that you provide in the entities declaration and in the token. The result of the +decisions is a series of Allow or Deny responses, along with the IDs of the policies that +produced each decision. The entities of a BatchIsAuthorizedWithToken API request can +contain up to 100 resources and up to 99 user groups. The requests of a +BatchIsAuthorizedWithToken API request can contain up to 30 requests. The +BatchIsAuthorizedWithToken operation doesn't have its own IAM permission. To authorize this +operation for Amazon Web Services principals, include the permission +verifiedpermissions:IsAuthorizedWithToken in their IAM policies. + +# Arguments +- `policy_store_id`: Specifies the ID of the policy store. Policies in this policy store + will be used to make an authorization decision for the input. +- `requests`: An array of up to 30 requests that you want Verified Permissions to evaluate. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accessToken"`: Specifies an access token for the principal that you want to authorize + in each request. This token is provided to you by the identity provider (IdP) associated + with the specified identity source. You must specify either an accessToken, an + identityToken, or both. Must be an access token. Verified Permissions returns an error if + the token_use claim in the submitted token isn't access. +- `"entities"`: Specifies the list of resources and their associated attributes that + Verified Permissions can examine when evaluating the policies. You can't include + principals in this parameter, only resource and action entities. This parameter can't + include any entities of a type that matches the user or group entity types that you defined + in your identity source. The BatchIsAuthorizedWithToken operation takes principal + attributes from only the identityToken or accessToken passed to the operation. For + action entities, you can include only their Identifier and EntityType. +- `"identityToken"`: Specifies an identity (ID) token for the principal that you want to + authorize in each request. This token is provided to you by the identity provider (IdP) + associated with the specified identity source. You must specify either an accessToken, an + identityToken, or both. Must be an ID token. Verified Permissions returns an error if the + token_use claim in the submitted token isn't id. +""" +function batch_is_authorized_with_token( + policyStoreId, requests; aws_config::AbstractAWSConfig=global_aws_config() +) + return verifiedpermissions( + "BatchIsAuthorizedWithToken", + Dict{String,Any}("policyStoreId" => policyStoreId, "requests" => requests); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_is_authorized_with_token( + policyStoreId, + requests, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return verifiedpermissions( + "BatchIsAuthorizedWithToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("policyStoreId" => policyStoreId, "requests" => requests), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_identity_source(configuration, policy_store_id) create_identity_source(configuration, policy_store_id, params::Dict{String,<:Any}) -Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP). -After you create an identity source, you can use the identities provided by the IdP as -proxies for the principal in authorization queries that use the IsAuthorizedWithToken -operation. These identities take the form of tokens that contain claims about the user, -such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens -and access tokens, and Verified Permissions can use either or both. Any combination of -identity and access tokens results in the same Cedar principal. Verified Permissions -automatically translates the information about the identities into the standard Cedar -attributes that can be evaluated by your policies. Because the Amazon Cognito identity and -access tokens can contain different information, the tokens you choose to use determine -which principal attributes are available to access when evaluating Cedar policies. If you -delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted -user continue to be usable until they expire. To reference a user from this identity -source in your Cedar policies, use the following syntax. -IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId> Where -IdentityType is the string that you provide to the PrincipalEntityType parameter for this -operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user -pool. +Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect +(OIDC) identity provider (IdP). After you create an identity source, you can use the +identities provided by the IdP as proxies for the principal in authorization queries that +use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These +identities take the form of tokens that contain claims about the user, such as IDs, +attributes and group memberships. Identity sources provide identity (ID) tokens and access +tokens. Verified Permissions derives information about your user and session from token +claims. Access tokens provide action context to your policies, and ID tokens provide +principal Attributes. Tokens from an identity source user continue to be usable until they +expire. Token revocation and resource deletion have no effect on the validity of a token in +your policy store To reference a user from this identity source in your Cedar policies, +refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity +type]::[User pool ID]|[user principal attribute], for example +MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect +(OIDC) provider: Namespace::[Entity type]::[principalIdClaim]|[user principal attribute], +for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified +Permissions is eventually consistent . It can take a few seconds for a new or changed +element to propagate through the service and be visible in the results of other Verified +Permissions operations. # Arguments - `configuration`: Specifies the details required to communicate with the identity provider - (IdP) associated with this identity source. At this time, the only valid member of this - structure is a Amazon Cognito user pool configuration. You must specify a UserPoolArn, and - optionally, a ClientId. + (IdP) associated with this identity source. - `policy_store_id`: Specifies the ID of the policy store in which you want to store this identity source. Only policies and requests made using this policy store can reference identities from the identity provider configured in the new identity source. @@ -44,8 +177,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same - ClientToken, but with different parameters, the retry fails with an - IdempotentParameterMismatch error. + ClientToken, but with different parameters, the retry fails with an ConflictException + error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, + the next request with the same parameters performs the operation again regardless of the + value of ClientToken. - `"principalEntityType"`: Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. """ @@ -99,7 +234,9 @@ the principal and resource to associate with this policy in the templateLinked s the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template. Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, -the operation fails and the policy isn't stored. +the operation fails and the policy isn't stored. Verified Permissions is eventually +consistent . It can take a few seconds for a new or changed element to propagate through +the service and be visible in the results of other Verified Permissions operations. # Arguments - `definition`: A structure that specifies the policy type and content to use for the new @@ -116,8 +253,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same - ClientToken, but with different parameters, the retry fails with an - IdempotentParameterMismatch error. + ClientToken, but with different parameters, the retry fails with an ConflictException + error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, + the next request with the same parameters performs the operation again regardless of the + value of ClientToken. """ function create_policy( definition, policyStoreId; aws_config::AbstractAWSConfig=global_aws_config() @@ -161,7 +300,11 @@ end create_policy_store(validation_settings) create_policy_store(validation_settings, params::Dict{String,<:Any}) -Creates a policy store. A policy store is a container for policy resources. +Creates a policy store. A policy store is a container for policy resources. Although Cedar +supports multiple namespaces, Verified Permissions currently supports only one namespace +per policy store. Verified Permissions is eventually consistent . It can take a few +seconds for a new or changed element to propagate through the service and be visible in the +results of other Verified Permissions operations. # Arguments - `validation_settings`: Specifies the validation setting for this policy store. Currently, @@ -179,8 +322,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same - ClientToken, but with different parameters, the retry fails with an - IdempotentParameterMismatch error. + ClientToken, but with different parameters, the retry fails with an ConflictException + error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, + the next request with the same parameters performs the operation again regardless of the + value of ClientToken. +- `"description"`: Descriptive text that you can provide to help with identification of the + current policy store. """ function create_policy_store( validationSettings; aws_config::AbstractAWSConfig=global_aws_config() @@ -225,7 +372,10 @@ A template must be instantiated into a policy by associating it with specific pr and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any -policies that are linked to that template are immediately updated as well. +policies that are linked to that template are immediately updated as well. Verified +Permissions is eventually consistent . It can take a few seconds for a new or changed +element to propagate through the service and be visible in the results of other Verified +Permissions operations. # Arguments - `policy_store_id`: The ID of the policy store in which to create the policy template. @@ -240,8 +390,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same - ClientToken, but with different parameters, the retry fails with an - IdempotentParameterMismatch error. + ClientToken, but with different parameters, the retry fails with an ConflictException + error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, + the next request with the same parameters performs the operation again regardless of the + value of ClientToken. - `"description"`: Specifies a description for the policy template. """ function create_policy_template( @@ -682,8 +834,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys principal authorized to perform this action on the resource? - `"context"`: Specifies additional context that can be used to make more granular authorization decisions. -- `"entities"`: Specifies the list of entities and their associated attributes that - Verified Permissions can examine when evaluating the policies. +- `"entities"`: Specifies the list of resources and principals and their associated + attributes that Verified Permissions can examine when evaluating the policies. You can + include only principal and resource entities in this parameter; you can't include actions. + You must specify actions in the schema. - `"principal"`: Specifies the principal for which the authorization decision is to be made. - `"resource"`: Specifies the resource for which the authorization decision is to be made. """ @@ -715,12 +869,16 @@ end is_authorized_with_token(policy_store_id, params::Dict{String,<:Any}) Makes an authorization decision about a service request described in the parameters. The -principal in this request comes from an external identity source. The information in the -parameters can also define additional context that Verified Permissions can include in the -evaluation. The request is evaluated against all matching policies in the specified policy -store. The result of the decision is either Allow or Deny, along with a list of the -policies that resulted in the decision. If you delete a Amazon Cognito user pool or user, -tokens from that deleted pool or that deleted user continue to be usable until they expire. +principal in this request comes from an external identity source in the form of an identity +token formatted as a JSON web token (JWT). The information in the parameters can also +define additional context that Verified Permissions can include in the evaluation. The +request is evaluated against all matching policies in the specified policy store. The +result of the decision is either Allow or Deny, along with a list of the policies that +resulted in the decision. At this time, Verified Permissions accepts tokens from only +Amazon Cognito. Verified Permissions validates each token that is specified in a request by +checking its expiration date and its signature. Tokens from an identity source user +continue to be usable until they expire. Token revocation and resource deletion have no +effect on the validity of a token in your policy store # Arguments - `policy_store_id`: Specifies the ID of the policy store. Policies in this policy store @@ -730,16 +888,25 @@ tokens from that deleted pool or that deleted user continue to be usable until t Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessToken"`: Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity - source. You must specify either an AccessToken or an IdentityToken, but not both. + source. You must specify either an accessToken, an identityToken, or both. Must be an + access token. Verified Permissions returns an error if the token_use claim in the submitted + token isn't access. - `"action"`: Specifies the requested action to be authorized. Is the specified principal authorized to perform this action on the specified resource. - `"context"`: Specifies additional context that can be used to make more granular authorization decisions. -- `"entities"`: Specifies the list of entities and their associated attributes that - Verified Permissions can examine when evaluating the policies. +- `"entities"`: Specifies the list of resources and their associated attributes that + Verified Permissions can examine when evaluating the policies. You can't include + principals in this parameter, only resource and action entities. This parameter can't + include any entities of a type that matches the user or group entity types that you defined + in your identity source. The IsAuthorizedWithToken operation takes principal attributes + from only the identityToken or accessToken passed to the operation. For action + entities, you can include only their Identifier and EntityType. - `"identityToken"`: Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified - identity source. You must specify either an AccessToken or an IdentityToken, but not both. + identity source. You must specify either an accessToken, an identityToken, or both. Must be + an ID token. Verified Permissions returns an error if the token_use claim in the submitted + token isn't id. - `"resource"`: Specifies the resource for which the authorization decision is made. For example, is the principal allowed to perform the action on the resource? """ @@ -783,14 +950,15 @@ store. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filters"`: Specifies characteristics of an identity source that you can use to limit the output to matching identity sources. -- `"maxResults"`: Specifies the total number of results that you want included on each page - of the response. If you do not include this parameter, it defaults to a value that is - specific to the operation. If additional items exist beyond the number you specify, the - NextToken response element is returned with a value (not null). Include the specified value - as the NextToken request parameter in the next call to the operation to get the next part - of the results. Note that the service might return fewer results than the maximum even when - there are more results available. You should check NextToken after every operation to - ensure that you receive all of the results. +- `"maxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. If you do not specify this parameter, the operation defaults to + 10 identity sources per response. You can specify a maximum of 50 identity sources per + response. - `"nextToken"`: Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's @@ -835,14 +1003,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filter"`: Specifies a filter that limits the response to only policies that match the specified criteria. For example, you list only the policies that reference a specified principal. -- `"maxResults"`: Specifies the total number of results that you want included on each page - of the response. If you do not include this parameter, it defaults to a value that is - specific to the operation. If additional items exist beyond the number you specify, the - NextToken response element is returned with a value (not null). Include the specified value - as the NextToken request parameter in the next call to the operation to get the next part - of the results. Note that the service might return fewer results than the maximum even when - there are more results available. You should check NextToken after every operation to - ensure that you receive all of the results. +- `"maxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. If you do not specify this parameter, the operation defaults to + 10 policies per response. You can specify a maximum of 50 policies per response. - `"nextToken"`: Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's @@ -879,14 +1047,14 @@ Returns a paginated list of all policy stores in the calling Amazon Web Services # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Specifies the total number of results that you want included on each page - of the response. If you do not include this parameter, it defaults to a value that is - specific to the operation. If additional items exist beyond the number you specify, the - NextToken response element is returned with a value (not null). Include the specified value - as the NextToken request parameter in the next call to the operation to get the next part - of the results. Note that the service might return fewer results than the maximum even when - there are more results available. You should check NextToken after every operation to - ensure that you receive all of the results. +- `"maxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. If you do not specify this parameter, the operation defaults to + 10 policy stores per response. You can specify a maximum of 50 policy stores per response. - `"nextToken"`: Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's @@ -917,14 +1085,15 @@ Returns a paginated list of all policy templates in the specified policy store. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Specifies the total number of results that you want included on each page - of the response. If you do not include this parameter, it defaults to a value that is - specific to the operation. If additional items exist beyond the number you specify, the - NextToken response element is returned with a value (not null). Include the specified value - as the NextToken request parameter in the next call to the operation to get the next part - of the results. Note that the service might return fewer results than the maximum even when - there are more results available. You should check NextToken after every operation to - ensure that you receive all of the results. +- `"maxResults"`: Specifies the total number of results that you want included in each + response. If additional items exist beyond the number you specify, the NextToken response + element is returned with a value (not null). Include the specified value as the NextToken + request parameter in the next call to the operation to get the next set of results. Note + that the service might return fewer results than the maximum even when there are more + results available. You should check NextToken after every operation to ensure that you + receive all of the results. If you do not specify this parameter, the operation defaults to + 10 policy templates per response. You can specify a maximum of 50 policy templates per + response. - `"nextToken"`: Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's @@ -963,7 +1132,10 @@ Creates or updates the policy schema in the specified policy store. The schema i validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you -later update a policy, then it is evaluated against the new schema at that time. +later update a policy, then it is evaluated against the new schema at that time. Verified +Permissions is eventually consistent . It can take a few seconds for a new or changed +element to propagate through the service and be visible in the results of other Verified +Permissions operations. # Arguments - `definition`: Specifies the definition of the schema to be stored. The schema definition @@ -1007,8 +1179,11 @@ end update_identity_source(identity_source_id, policy_store_id, update_configuration) update_identity_source(identity_source_id, policy_store_id, update_configuration, params::Dict{String,<:Any}) -Updates the specified identity source to use a new identity provider (IdP) source, or to -change the mapping of identities from the IdP to a different principal entity type. +Updates the specified identity source to use a new identity provider (IdP), or to change +the mapping of identities from the IdP to a different principal entity type. Verified +Permissions is eventually consistent . It can take a few seconds for a new or changed +element to propagate through the service and be visible in the results of other Verified +Permissions operations. # Arguments - `identity_source_id`: Specifies the ID of the identity source that you want to update. @@ -1073,10 +1248,18 @@ end Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using -UpdatePolicyTemplate. If policy validation is enabled in the policy store, then updating a -static policy causes Verified Permissions to validate the policy against the schema in the -policy store. If the updated static policy doesn't pass validation, the operation fails and -the update isn't stored. +UpdatePolicyTemplate. If policy validation is enabled in the policy store, then updating +a static policy causes Verified Permissions to validate the policy against the schema in +the policy store. If the updated static policy doesn't pass validation, the operation fails +and the update isn't stored. When you edit a static policy, you can change only certain +elements of a static policy: The action referenced by the policy. A condition clause, +such as when and unless. You can't change these elements of a static policy: Changing +a policy from a static policy to a template-linked policy. Changing the effect of a +static policy from permit or forbid. The principal referenced by a static policy. The +resource referenced by a static policy. To update a template-linked policy, you must +update the template instead. Verified Permissions is eventually consistent . It can +take a few seconds for a new or changed element to propagate through the service and be +visible in the results of other Verified Permissions operations. # Arguments - `definition`: Specifies the updated policy content that you want to replace on the @@ -1135,13 +1318,19 @@ end update_policy_store(policy_store_id, validation_settings) update_policy_store(policy_store_id, validation_settings, params::Dict{String,<:Any}) -Modifies the validation setting for a policy store. +Modifies the validation setting for a policy store. Verified Permissions is eventually +consistent . It can take a few seconds for a new or changed element to propagate through +the service and be visible in the results of other Verified Permissions operations. # Arguments - `policy_store_id`: Specifies the ID of the policy store that you want to update - `validation_settings`: A structure that defines the validation settings that want to enable for the policy store. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: Descriptive text that you can provide to help with identification of the + current policy store. """ function update_policy_store( policyStoreId, validationSettings; aws_config::AbstractAWSConfig=global_aws_config() @@ -1184,8 +1373,11 @@ end Updates the specified policy template. You can update only the description and the some elements of the policyBody. Changes you make to the policy template content are -immediately reflected in authorization decisions that involve all template-linked policies -instantiated from this template. +immediately (within the constraints of eventual consistency) reflected in authorization +decisions that involve all template-linked policies instantiated from this template. +Verified Permissions is eventually consistent . It can take a few seconds for a new or +changed element to propagate through the service and be visible in the results of other +Verified Permissions operations. # Arguments - `policy_store_id`: Specifies the ID of the policy store that contains the policy template diff --git a/src/services/vpc_lattice.jl b/src/services/vpc_lattice.jl index fa3315868e..b07912bf04 100644 --- a/src/services/vpc_lattice.jl +++ b/src/services/vpc_lattice.jl @@ -9,7 +9,9 @@ using AWS.UUIDs batch_update_rule(listener_identifier, rules, service_identifier, params::Dict{String,<:Any}) Updates the listener rules in a batch. You can use this operation to change the priority of -listener rules. This can be useful when bulk updating or swapping rule priority. +listener rules. This can be useful when bulk updating or swapping rule priority. Required +permissions: vpc-lattice:UpdateRule For more information, see How Amazon VPC Lattice works +with IAM in the Amazon VPC Lattice User Guide. # Arguments - `listener_identifier`: The ID or Amazon Resource Name (ARN) of the listener. @@ -53,7 +55,7 @@ end Enables access logs to be sent to Amazon CloudWatch, Amazon S3, and Amazon Kinesis Data Firehose. The service network owner can use the access logs to audit the services in the -network. The service network owner will only see access logs from clients and services that +network. The service network owner can only see access logs from clients and services that are associated with their service network. Access log entries represent traffic originated from VPCs associated with that network. For more information, see Access logs in the Amazon VPC Lattice User Guide. @@ -123,15 +125,12 @@ requests to your services. For more information, see Listeners in the Amazon VPC User Guide. # Arguments -- `default_action`: The action for the default rule. Each listener has a default rule. Each - rule consists of a priority, one or more actions, and one or more conditions. The default - rule is the rule that's used if no other rules match. Each rule must include exactly one of - the following types of actions: forward or fixed-response, and it must be the last action - to be performed. +- `default_action`: The action for the default rule. Each listener has a default rule. The + default rule is used if no other rules match. - `name`: The name of the listener. A listener name must be unique within a service. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. -- `protocol`: The listener protocol HTTP or HTTPS. +- `protocol`: The listener protocol. - `service_identifier`: The ID or Amazon Resource Name (ARN) of the service. # Optional Parameters @@ -382,12 +381,13 @@ end create_service_network_service_association(service_identifier, service_network_identifier) create_service_network_service_association(service_identifier, service_network_identifier, params::Dict{String,<:Any}) -Associates a service with a service network. You can't use this operation if the service -and service network are already associated or if there is a disassociation or deletion in -progress. If the association fails, you can retry the operation by deleting the association -and recreating it. You cannot associate a service and service network that are shared with -a caller. The caller must own either the service or the service network. As a result of -this operation, the association is created in the service network account and the +Associates a service with a service network. For more information, see Manage service +associations in the Amazon VPC Lattice User Guide. You can't use this operation if the +service and service network are already associated or if there is a disassociation or +deletion in progress. If the association fails, you can retry the operation by deleting the +association and recreating it. You cannot associate a service and service network that are +shared with a caller. The caller must own either the service or the service network. As a +result of this operation, the association is created in the service network account and the association owner account. # Arguments @@ -542,8 +542,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. -- `"config"`: The target group configuration. If type is set to LAMBDA, this parameter - doesn't apply. +- `"config"`: The target group configuration. - `"tags"`: The tags for the target group. """ function create_target_group(name, type; aws_config::AbstractAWSConfig=global_aws_config()) @@ -618,9 +617,9 @@ end delete_auth_policy(resource_identifier, params::Dict{String,<:Any}) Deletes the specified auth policy. If an auth is set to AWS_IAM and the auth policy is -deleted, all requests will be denied by default. If you are trying to remove the auth -policy completely, you must set the auth_type to NONE. If auth is enabled on the resource, -but no auth policy is set, all requests will be denied. +deleted, all requests are denied. If you are trying to remove the auth policy completely, +you must set the auth type to NONE. If auth is enabled on the resource, but no auth policy +is set, all requests are denied. # Arguments - `resource_identifier`: The ID or Amazon Resource Name (ARN) of the resource. @@ -845,7 +844,7 @@ end delete_service_network_service_association(service_network_service_association_identifier, params::Dict{String,<:Any}) Deletes the association between a specified service and the specific service network. This -request will fail if an association is still in progress. +operation fails if an association is still in progress. # Arguments - `service_network_service_association_identifier`: The ID or Amazon Resource Name (ARN) of @@ -1472,8 +1471,8 @@ either by service or service network. You must provide either the service networ identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a -resource that is shared with another account, the association will include the local -account ID as the prefix in the ARN for each account the resource is shared with. +resource that is shared with another account, the association includes the local account ID +as the prefix in the ARN for each account the resource is shared with. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1640,7 +1639,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of results to return. - `"nextToken"`: A pagination token for the next page of results. - `"targetGroupType"`: The target group type. -- `"vpcIdentifier"`: The ID or Amazon Resource Name (ARN) of the service. +- `"vpcIdentifier"`: The ID or Amazon Resource Name (ARN) of the VPC. """ function list_target_groups(; aws_config::AbstractAWSConfig=global_aws_config()) return vpc_lattice( @@ -1673,7 +1672,7 @@ this API to check the health status of targets. You can also filter the results Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of results to return. - `"nextToken"`: A pagination token for the next page of results. -- `"targets"`: The targets to list. +- `"targets"`: The targets. """ function list_targets( targetGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -1704,7 +1703,7 @@ end put_auth_policy(policy, resource_identifier, params::Dict{String,<:Any}) Creates or updates the auth policy. The policy string in JSON must not contain newlines or -blank lines. +blank lines. For more information, see Auth policies in the Amazon VPC Lattice User Guide. # Arguments - `policy`: The auth policy. The policy string in JSON must not contain newlines or blank diff --git a/src/services/wafv2.jl b/src/services/wafv2.jl index b9f73089d0..22acbfee95 100644 --- a/src/services/wafv2.jl +++ b/src/services/wafv2.jl @@ -15,16 +15,19 @@ Amazon Web Services Verified Access instance. For Amazon CloudFront, don't use Instead, use your CloudFront distribution configuration. To associate a web ACL, in the CloudFront call UpdateDistribution, set the web ACL ID to the Amazon Resource Name (ARN) of the web ACL. For information, see UpdateDistribution in the Amazon CloudFront Developer -Guide. When you make changes to web ACLs or web ACL components, like rules and rule -groups, WAF propagates the changes everywhere that the web ACL and its components are -stored and used. Your changes are applied within seconds, but there might be a brief period -of inconsistency when the changes have arrived in some places and not in others. So, for -example, if you change a rule action setting, the action might be the old action in one -area and the new action in another area. Or if you add an IP address to an IP set used in a -blocking rule, the new address might briefly be blocked in one area while still allowed in -another. This temporary inconsistency can occur when you first associate a web ACL with an -Amazon Web Services resource and when you change a web ACL that is already associated with -a resource. Generally, any inconsistencies of this type last only a few seconds. +Guide. Required permissions for customer-managed IAM policies This call requires +permissions that are specific to the protected resource type. For details, see Permissions +for AssociateWebACL in the WAF Developer Guide. Temporary inconsistencies during updates +When you create or change a web ACL or other WAF resources, the changes take a small amount +of time to propagate to all areas where the resources are stored. The propagation time can +be from a few seconds to a number of minutes. The following are examples of the temporary +inconsistencies that you might notice during change propagation: After you create a web +ACL, if you try to associate it with a resource, you might get an exception indicating that +the web ACL is unavailable. After you add a rule group to a web ACL, the new rule group +rules might be in effect in one area where the web ACL is used and not in another. After +you change a rule action setting, you might see the old action in some places and the new +action in others. After you add an IP address to an IP set that is in use in a blocking +rule, the new address might be blocked in one area while still allowed in another. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource to associate with the web @@ -142,7 +145,7 @@ you generate a key, you can copy it for use in your JavaScript integration. --region=us-east-1. API and SDKs - For all calls, use the Region endpoint us-east-1. - `token_domains`: The client application domains that you want to use this API key for. Example JSON: \"TokenDomains\": [\"abc.com\", \"store.abc.com\"] Public suffixes aren't - allowed. For example, you can't use usa.gov or co.uk as token domains. + allowed. For example, you can't use gov.au or co.uk as token domains. """ function create_apikey( @@ -186,15 +189,14 @@ lists those IP addresses. # Arguments - `addresses`: Contains an array of strings that specifies zero or more IP addresses or - blocks of IP addresses. All addresses must be specified using Classless Inter-Domain - Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0. Example - address strings: To configure WAF to allow, block, or count requests that originated - from the IP address 192.0.2.44, specify 192.0.2.44/32. To configure WAF to allow, block, - or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify - 192.0.2.0/24. To configure WAF to allow, block, or count requests that originated from - the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify - 1111:0000:0000:0000:0000:0000:0000:0111/128. To configure WAF to allow, block, or count - requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to + blocks of IP addresses that you want WAF to inspect for in incoming requests. All addresses + must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all + IPv4 and IPv6 CIDR ranges except for /0. Example address strings: For requests that + originated from the IP address 192.0.2.44, specify 192.0.2.44/32. For requests that + originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. For + requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, + specify 1111:0000:0000:0000:0000:0000:0000:0111/128. For requests that originated from IP + addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing. Example JSON Addresses specifications: @@ -369,9 +371,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys on count and size for custom request and response settings, see WAF quotas in the WAF Developer Guide. - `"Description"`: A description of the rule group that helps with identification. -- `"Rules"`: The Rule statements used to identify the web requests that you want to allow, - block, or count. Each rule includes one top-level statement that WAF uses to identify - matching web requests, and parameters that govern how WAF handles them. +- `"Rules"`: The Rule statements used to identify the web requests that you want to manage. + Each rule includes one top-level statement that WAF uses to identify matching web requests, + and parameters that govern how WAF handles them. - `"Tags"`: An array of key:value pairs to associate with the resource. """ function create_rule_group( @@ -425,14 +427,15 @@ end create_web_acl(default_action, name, scope, visibility_config, params::Dict{String,<:Any}) Creates a WebACL per the specifications provided. A web ACL defines a collection of rules -to use to inspect and control web requests. Each rule has an action defined (allow, block, -or count) for requests that match the statement of the rule. In the web ACL, you assign a -default action to take (allow, block) for any request that does not match any of the rules. -The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule -group. You can associate a web ACL with one or more Amazon Web Services resources to -protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST -API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an -App Runner service, or an Amazon Web Services Verified Access instance. +to use to inspect and control web requests. Each rule has a statement that defines what to +look for in web requests and an action that WAF applies to requests that match the +statement. In the web ACL, you assign a default action to take (allow, block) for any +request that does not match any of the rules. The rules in a web ACL can be a combination +of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one +or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront +distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync +GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services +Verified Access instance. # Arguments - `default_action`: The action to perform if none of the Rules contained in the WebACL @@ -453,10 +456,12 @@ App Runner service, or an Amazon Web Services Verified Access instance. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AssociationConfig"`: Specifies custom configurations for the associations between the web ACL and protected resources. Use this to customize the maximum size of the request - body that your protected CloudFront distributions forward to WAF for inspection. The - default is 16 KB (16,384 kilobytes). You are charged additional fees when your protected - resources forward body sizes that are larger than the default. For more information, see - WAF Pricing. + body that your protected resources forward to WAF for inspection. You can customize this + setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access + resources. The default setting is 16 KB (16,384 bytes). You are charged additional fees + when your protected resources forward body sizes that are larger than the default. For more + information, see WAF Pricing. For Application Load Balancer and AppSync, the limit is + fixed at 8 KB (8,192 bytes). - `"CaptchaConfig"`: Specifies how WAF should handle CAPTCHA evaluations for rules that don't have their own CaptchaConfig settings. If you don't specify this, WAF uses its default settings for CaptchaConfig. @@ -471,9 +476,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys about the limits on count and size for custom request and response settings, see WAF quotas in the WAF Developer Guide. - `"Description"`: A description of the web ACL that helps with identification. -- `"Rules"`: The Rule statements used to identify the web requests that you want to allow, - block, or count. Each rule includes one top-level statement that WAF uses to identify - matching web requests, and parameters that govern how WAF handles them. +- `"Rules"`: The Rule statements used to identify the web requests that you want to manage. + Each rule includes one top-level statement that WAF uses to identify matching web requests, + and parameters that govern how WAF handles them. - `"Tags"`: An array of key:value pairs to associate with the resource. - `"TokenDomains"`: Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a @@ -482,7 +487,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains. Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" } Public - suffixes aren't allowed. For example, you can't use usa.gov or co.uk as token domains. + suffixes aren't allowed. For example, you can't use gov.au or co.uk as token domains. """ function create_web_acl( DefaultAction, @@ -530,6 +535,50 @@ function create_web_acl( ) end +""" + delete_apikey(apikey, scope) + delete_apikey(apikey, scope, params::Dict{String,<:Any}) + +Deletes the specified API key. After you delete a key, it can take up to 24 hours for WAF +to disallow use of the key in all regions. + +# Arguments +- `apikey`: The encrypted API key that you want to delete. +- `scope`: Specifies whether this is for an Amazon CloudFront distribution or for a + regional application. A regional application can be an Application Load Balancer (ALB), an + Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App + Runner service, or an Amazon Web Services Verified Access instance. To work with + CloudFront, you must also specify the Region US East (N. Virginia) as follows: CLI - + Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + --region=us-east-1. API and SDKs - For all calls, use the Region endpoint us-east-1. + +""" +function delete_apikey(APIKey, Scope; aws_config::AbstractAWSConfig=global_aws_config()) + return wafv2( + "DeleteAPIKey", + Dict{String,Any}("APIKey" => APIKey, "Scope" => Scope); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_apikey( + APIKey, + Scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wafv2( + "DeleteAPIKey", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("APIKey" => APIKey, "Scope" => Scope), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_firewall_manager_rule_groups(web_aclarn, web_acllock_token) delete_firewall_manager_rule_groups(web_aclarn, web_acllock_token, params::Dict{String,<:Any}) @@ -653,6 +702,16 @@ Deletes the LoggingConfiguration from the specified web ACL. - `resource_arn`: The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LogScope"`: The owner of the logging configuration, which must be set to CUSTOMER for + the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration + that is managed through Amazon Security Lake. You can use Security Lake to collect log and + event data from various sources for normalization, analysis, and management. For + information, see Collecting data from Amazon Web Services services in the Amazon Security + Lake user guide. Default: CUSTOMER +- `"LogType"`: Used to distinguish between various logging options. Currently, there is one + option. Default: WAF_LOGS """ function delete_logging_configuration( ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1068,7 +1127,9 @@ GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon We Verified Access instance. For Amazon CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To disassociate a web ACL, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution in -the Amazon CloudFront API Reference. +the Amazon CloudFront API Reference. Required permissions for customer-managed IAM +policies This call requires permissions that are specific to the protected resource type. +For details, see Permissions for DisassociateWebACL in the WAF Developer Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource to disassociate from the @@ -1265,6 +1326,16 @@ Returns the LoggingConfiguration for the specified web ACL. - `resource_arn`: The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LogScope"`: The owner of the logging configuration, which must be set to CUSTOMER for + the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration + that is managed through Amazon Security Lake. You can use Security Lake to collect log and + event data from various sources for normalization, analysis, and management. For + information, see Collecting data from Amazon Web Services services in the Amazon Security + Lake user guide. Default: CUSTOMER +- `"LogType"`: Used to distinguish between various logging options. Currently, there is one + option. Default: WAF_LOGS """ function get_logging_configuration( ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1735,7 +1806,15 @@ end get_web_aclfor_resource(resource_arn) get_web_aclfor_resource(resource_arn, params::Dict{String,<:Any}) -Retrieves the WebACL for the specified resource. +Retrieves the WebACL for the specified resource. This call uses GetWebACL, to verify that +your account has permission to access the retrieved web ACL. If you get an error that +indicates that your account isn't authorized to perform wafv2:GetWebACL on the resource, +that error won't be included in your CloudTrail event history. For Amazon CloudFront, +don't use this call. Instead, call the CloudFront action GetDistributionConfig. For +information, see GetDistributionConfig in the Amazon CloudFront API Reference. Required +permissions for customer-managed IAM policies This call requires permissions that are +specific to the protected resource type. For details, see Permissions for +GetWebACLForResource in the WAF Developer Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource whose web ACL you want to @@ -1999,6 +2078,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Limit"`: The maximum number of objects that you want WAF to return for this request. If more objects are available, in the response, WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects. +- `"LogScope"`: The owner of the logging configuration, which must be set to CUSTOMER for + the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration + that is managed through Amazon Security Lake. You can use Security Lake to collect log and + event data from various sources for normalization, analysis, and management. For + information, see Collecting data from Amazon Web Services services in the Amazon Security + Lake user guide. Default: CUSTOMER - `"NextMarker"`: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the @@ -2172,8 +2257,12 @@ end list_resources_for_web_acl(web_aclarn, params::Dict{String,<:Any}) Retrieves an array of the Amazon Resource Names (ARNs) for the regional resources that are -associated with the specified web ACL. If you want the list of Amazon CloudFront resources, -use the CloudFront call ListDistributionsByWebACLId. +associated with the specified web ACL. For Amazon CloudFront, don't use this call. +Instead, use the CloudFront call ListDistributionsByWebACLId. For information, see +ListDistributionsByWebACLId in the Amazon CloudFront API Reference. Required permissions +for customer-managed IAM policies This call requires permissions that are specific to the +protected resource type. For details, see Permissions for ListResourcesForWebACL in the WAF +Developer Guide. # Arguments - `web_aclarn`: The Amazon Resource Name (ARN) of the web ACL. @@ -2639,29 +2728,29 @@ end Updates the specified IPSet. This operation completely replaces the mutable specifications that you already have for the IP set with the ones that you provide to this call. To modify an IP set, do the following: Retrieve it by calling GetIPSet Update -its settings as needed Provide the complete IP set specification to this call When you -make changes to web ACLs or web ACL components, like rules and rule groups, WAF propagates -the changes everywhere that the web ACL and its components are stored and used. Your -changes are applied within seconds, but there might be a brief period of inconsistency when -the changes have arrived in some places and not in others. So, for example, if you change a -rule action setting, the action might be the old action in one area and the new action in -another area. Or if you add an IP address to an IP set used in a blocking rule, the new -address might briefly be blocked in one area while still allowed in another. This temporary -inconsistency can occur when you first associate a web ACL with an Amazon Web Services -resource and when you change a web ACL that is already associated with a resource. -Generally, any inconsistencies of this type last only a few seconds. +its settings as needed Provide the complete IP set specification to this call +Temporary inconsistencies during updates When you create or change a web ACL or other WAF +resources, the changes take a small amount of time to propagate to all areas where the +resources are stored. The propagation time can be from a few seconds to a number of +minutes. The following are examples of the temporary inconsistencies that you might notice +during change propagation: After you create a web ACL, if you try to associate it with a +resource, you might get an exception indicating that the web ACL is unavailable. After +you add a rule group to a web ACL, the new rule group rules might be in effect in one area +where the web ACL is used and not in another. After you change a rule action setting, you +might see the old action in some places and the new action in others. After you add an +IP address to an IP set that is in use in a blocking rule, the new address might be blocked +in one area while still allowed in another. # Arguments - `addresses`: Contains an array of strings that specifies zero or more IP addresses or - blocks of IP addresses. All addresses must be specified using Classless Inter-Domain - Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0. Example - address strings: To configure WAF to allow, block, or count requests that originated - from the IP address 192.0.2.44, specify 192.0.2.44/32. To configure WAF to allow, block, - or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify - 192.0.2.0/24. To configure WAF to allow, block, or count requests that originated from - the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify - 1111:0000:0000:0000:0000:0000:0000:0111/128. To configure WAF to allow, block, or count - requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to + blocks of IP addresses that you want WAF to inspect for in incoming requests. All addresses + must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all + IPv4 and IPv6 CIDR ranges except for /0. Example address strings: For requests that + originated from the IP address 192.0.2.44, specify 192.0.2.44/32. For requests that + originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. For + requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, + specify 1111:0000:0000:0000:0000:0000:0000:0111/128. For requests that originated from IP + addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing. Example JSON Addresses specifications: @@ -2839,17 +2928,17 @@ Updates the specified RegexPatternSet. This operation completely replaces the m specifications that you already have for the regex pattern set with the ones that you provide to this call. To modify a regex pattern set, do the following: Retrieve it by calling GetRegexPatternSet Update its settings as needed Provide the complete regex -pattern set specification to this call When you make changes to web ACLs or web ACL -components, like rules and rule groups, WAF propagates the changes everywhere that the web -ACL and its components are stored and used. Your changes are applied within seconds, but -there might be a brief period of inconsistency when the changes have arrived in some places -and not in others. So, for example, if you change a rule action setting, the action might -be the old action in one area and the new action in another area. Or if you add an IP -address to an IP set used in a blocking rule, the new address might briefly be blocked in -one area while still allowed in another. This temporary inconsistency can occur when you -first associate a web ACL with an Amazon Web Services resource and when you change a web -ACL that is already associated with a resource. Generally, any inconsistencies of this type -last only a few seconds. +pattern set specification to this call Temporary inconsistencies during updates When +you create or change a web ACL or other WAF resources, the changes take a small amount of +time to propagate to all areas where the resources are stored. The propagation time can be +from a few seconds to a number of minutes. The following are examples of the temporary +inconsistencies that you might notice during change propagation: After you create a web +ACL, if you try to associate it with a resource, you might get an exception indicating that +the web ACL is unavailable. After you add a rule group to a web ACL, the new rule group +rules might be in effect in one area where the web ACL is used and not in another. After +you change a rule action setting, you might see the old action in some places and the new +action in others. After you add an IP address to an IP set that is in use in a blocking +rule, the new address might be blocked in one area while still allowed in another. # Arguments - `id`: A unique identifier for the set. This ID is returned in the responses to create and @@ -2933,20 +3022,21 @@ Updates the specified RuleGroup. This operation completely replaces the mutable specifications that you already have for the rule group with the ones that you provide to this call. To modify a rule group, do the following: Retrieve it by calling GetRuleGroup Update its settings as needed Provide the complete rule group -specification to this call When you make changes to web ACLs or web ACL components, like -rules and rule groups, WAF propagates the changes everywhere that the web ACL and its -components are stored and used. Your changes are applied within seconds, but there might be -a brief period of inconsistency when the changes have arrived in some places and not in -others. So, for example, if you change a rule action setting, the action might be the old -action in one area and the new action in another area. Or if you add an IP address to an IP -set used in a blocking rule, the new address might briefly be blocked in one area while -still allowed in another. This temporary inconsistency can occur when you first associate a -web ACL with an Amazon Web Services resource and when you change a web ACL that is already -associated with a resource. Generally, any inconsistencies of this type last only a few -seconds. A rule group defines a collection of rules to inspect and control web requests -that you can use in a WebACL. When you create a rule group, you define an immutable -capacity limit. If you update a rule group, you must stay within the capacity. This allows -others to reuse the rule group with confidence in its capacity requirements. +specification to this call A rule group defines a collection of rules to inspect and +control web requests that you can use in a WebACL. When you create a rule group, you define +an immutable capacity limit. If you update a rule group, you must stay within the capacity. +This allows others to reuse the rule group with confidence in its capacity requirements. +Temporary inconsistencies during updates When you create or change a web ACL or other WAF +resources, the changes take a small amount of time to propagate to all areas where the +resources are stored. The propagation time can be from a few seconds to a number of +minutes. The following are examples of the temporary inconsistencies that you might notice +during change propagation: After you create a web ACL, if you try to associate it with a +resource, you might get an exception indicating that the web ACL is unavailable. After +you add a rule group to a web ACL, the new rule group rules might be in effect in one area +where the web ACL is used and not in another. After you change a rule action setting, you +might see the old action in some places and the new action in others. After you add an +IP address to an IP set that is in use in a blocking rule, the new address might be blocked +in one area while still allowed in another. # Arguments - `id`: A unique identifier for the rule group. This ID is returned in the responses to @@ -2980,9 +3070,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys on count and size for custom request and response settings, see WAF quotas in the WAF Developer Guide. - `"Description"`: A description of the rule group that helps with identification. -- `"Rules"`: The Rule statements used to identify the web requests that you want to allow, - block, or count. Each rule includes one top-level statement that WAF uses to identify - matching web requests, and parameters that govern how WAF handles them. +- `"Rules"`: The Rule statements used to identify the web requests that you want to manage. + Each rule includes one top-level statement that WAF uses to identify matching web requests, + and parameters that govern how WAF handles them. """ function update_rule_group( Id, @@ -3043,25 +3133,26 @@ the resources that you have associated with the web ACL. This operation comple replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call. To modify a web ACL, do the following: Retrieve it by calling GetWebACL Update its settings as needed Provide the complete web ACL -specification to this call When you make changes to web ACLs or web ACL components, like -rules and rule groups, WAF propagates the changes everywhere that the web ACL and its -components are stored and used. Your changes are applied within seconds, but there might be -a brief period of inconsistency when the changes have arrived in some places and not in -others. So, for example, if you change a rule action setting, the action might be the old -action in one area and the new action in another area. Or if you add an IP address to an IP -set used in a blocking rule, the new address might briefly be blocked in one area while -still allowed in another. This temporary inconsistency can occur when you first associate a -web ACL with an Amazon Web Services resource and when you change a web ACL that is already -associated with a resource. Generally, any inconsistencies of this type last only a few -seconds. A web ACL defines a collection of rules to use to inspect and control web -requests. Each rule has an action defined (allow, block, or count) for requests that match -the statement of the rule. In the web ACL, you assign a default action to take (allow, -block) for any request that does not match any of the rules. The rules in a web ACL can be -a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web -ACL with one or more Amazon Web Services resources to protect. The resources can be an -Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load -Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an -Amazon Web Services Verified Access instance. +specification to this call A web ACL defines a collection of rules to use to inspect +and control web requests. Each rule has a statement that defines what to look for in web +requests and an action that WAF applies to requests that match the statement. In the web +ACL, you assign a default action to take (allow, block) for any request that does not match +any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, +and managed rule group. You can associate a web ACL with one or more Amazon Web Services +resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API +Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito +user pool, an App Runner service, or an Amazon Web Services Verified Access instance. +Temporary inconsistencies during updates When you create or change a web ACL or other WAF +resources, the changes take a small amount of time to propagate to all areas where the +resources are stored. The propagation time can be from a few seconds to a number of +minutes. The following are examples of the temporary inconsistencies that you might notice +during change propagation: After you create a web ACL, if you try to associate it with a +resource, you might get an exception indicating that the web ACL is unavailable. After +you add a rule group to a web ACL, the new rule group rules might be in effect in one area +where the web ACL is used and not in another. After you change a rule action setting, you +might see the old action in some places and the new action in others. After you add an +IP address to an IP set that is in use in a blocking rule, the new address might be blocked +in one area while still allowed in another. # Arguments - `default_action`: The action to perform if none of the Rules contained in the WebACL @@ -3091,10 +3182,12 @@ Amazon Web Services Verified Access instance. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AssociationConfig"`: Specifies custom configurations for the associations between the web ACL and protected resources. Use this to customize the maximum size of the request - body that your protected CloudFront distributions forward to WAF for inspection. The - default is 16 KB (16,384 kilobytes). You are charged additional fees when your protected - resources forward body sizes that are larger than the default. For more information, see - WAF Pricing. + body that your protected resources forward to WAF for inspection. You can customize this + setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access + resources. The default setting is 16 KB (16,384 bytes). You are charged additional fees + when your protected resources forward body sizes that are larger than the default. For more + information, see WAF Pricing. For Application Load Balancer and AppSync, the limit is + fixed at 8 KB (8,192 bytes). - `"CaptchaConfig"`: Specifies how WAF should handle CAPTCHA evaluations for rules that don't have their own CaptchaConfig settings. If you don't specify this, WAF uses its default settings for CaptchaConfig. @@ -3109,9 +3202,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys about the limits on count and size for custom request and response settings, see WAF quotas in the WAF Developer Guide. - `"Description"`: A description of the web ACL that helps with identification. -- `"Rules"`: The Rule statements used to identify the web requests that you want to allow, - block, or count. Each rule includes one top-level statement that WAF uses to identify - matching web requests, and parameters that govern how WAF handles them. +- `"Rules"`: The Rule statements used to identify the web requests that you want to manage. + Each rule includes one top-level statement that WAF uses to identify matching web requests, + and parameters that govern how WAF handles them. - `"TokenDomains"`: Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a token, it uses the domain of the Amazon Web Services resource that the web ACL is @@ -3119,7 +3212,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains. Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" } Public - suffixes aren't allowed. For example, you can't use usa.gov or co.uk as token domains. + suffixes aren't allowed. For example, you can't use gov.au or co.uk as token domains. """ function update_web_acl( DefaultAction, diff --git a/src/services/wellarchitected.jl b/src/services/wellarchitected.jl index df0409f481..572688d456 100644 --- a/src/services/wellarchitected.jl +++ b/src/services/wellarchitected.jl @@ -377,6 +377,135 @@ function create_profile_share( ) end +""" + create_review_template(client_request_token, description, lenses, template_name) + create_review_template(client_request_token, description, lenses, template_name, params::Dict{String,<:Any}) + +Create a review template. Disclaimer Do not include or gather personal identifiable +information (PII) of end users or other identifiable individuals in or via your review +templates. If your review template or those shared with you and used in your account do +include or collect PII you are responsible for: ensuring that the included PII is processed +in accordance with applicable law, providing adequate privacy notices, and obtaining +necessary consents for processing such data. + +# Arguments +- `client_request_token`: +- `description`: The review template description. +- `lenses`: Lenses applied to the review template. +- `template_name`: Name of the review template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Notes"`: +- `"Tags"`: The tags assigned to the review template. +""" +function create_review_template( + ClientRequestToken, + Description, + Lenses, + TemplateName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/reviewTemplates", + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, + "Description" => Description, + "Lenses" => Lenses, + "TemplateName" => TemplateName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_review_template( + ClientRequestToken, + Description, + Lenses, + TemplateName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/reviewTemplates", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, + "Description" => Description, + "Lenses" => Lenses, + "TemplateName" => TemplateName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_template_share(client_request_token, shared_with, template_arn) + create_template_share(client_request_token, shared_with, template_arn, params::Dict{String,<:Any}) + +Create a review template share. The owner of a review template can share it with other +Amazon Web Services accounts, users, an organization, and organizational units (OUs) in the +same Amazon Web Services Region. Shared access to a review template is not removed until +the review template share invitation is deleted. If you share a review template with an +organization or OU, all accounts in the organization or OU are granted access to the review +template. Disclaimer By sharing your review template with other Amazon Web Services +accounts, you acknowledge that Amazon Web Services will make your review template available +to those other accounts. + +# Arguments +- `client_request_token`: +- `shared_with`: +- `template_arn`: The review template ARN. + +""" +function create_template_share( + ClientRequestToken, + SharedWith, + TemplateArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/templates/shares/$(TemplateArn)", + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, "SharedWith" => SharedWith + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_template_share( + ClientRequestToken, + SharedWith, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/templates/shares/$(TemplateArn)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, "SharedWith" => SharedWith + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_workload(client_request_token, description, environment, lenses, workload_name) create_workload(client_request_token, description, environment, lenses, workload_name, params::Dict{String,<:Any}) @@ -387,7 +516,10 @@ Amazon Web Services Region. Only the owner of a workload can delete it. For more information, see Defining a Workload in the Well-Architected Tool User Guide. Either AwsRegions, NonAwsRegions, or both must be specified when creating a workload. You also must specify ReviewOwner, even though the parameter is listed as not being required in the -following section. +following section. When creating a workload using a review template, you must have the +following IAM permissions: wellarchitected:GetReviewTemplate +wellarchitected:GetReviewTemplateAnswer wellarchitected:ListReviewTemplateAnswers +wellarchitected:GetReviewTemplateLensReview # Arguments - `client_request_token`: @@ -406,11 +538,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys workload. - `"Industry"`: - `"IndustryType"`: +- `"JiraConfiguration"`: Jira configuration settings when creating a workload. - `"NonAwsRegions"`: - `"Notes"`: - `"PillarPriorities"`: - `"ProfileArns"`: The list of profile ARNs associated with the workload. - `"ReviewOwner"`: +- `"ReviewTemplateArns"`: The list of review template ARNs to associate with the workload. - `"Tags"`: The tags to be associated with the workload. """ function create_workload( @@ -732,6 +866,98 @@ function delete_profile_share( ) end +""" + delete_review_template(client_request_token, template_arn) + delete_review_template(client_request_token, template_arn, params::Dict{String,<:Any}) + +Delete a review template. Only the owner of a review template can delete it. After the +review template is deleted, Amazon Web Services accounts, users, organizations, and +organizational units (OUs) that you shared the review template with will no longer be able +to apply it to new workloads. + +# Arguments +- `client_request_token`: +- `template_arn`: The review template ARN. + +""" +function delete_review_template( + ClientRequestToken, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "DELETE", + "/reviewTemplates/$(TemplateArn)", + Dict{String,Any}("ClientRequestToken" => ClientRequestToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_review_template( + ClientRequestToken, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "DELETE", + "/reviewTemplates/$(TemplateArn)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClientRequestToken" => ClientRequestToken), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_template_share(client_request_token, share_id, template_arn) + delete_template_share(client_request_token, share_id, template_arn, params::Dict{String,<:Any}) + +Delete a review template share. After the review template share is deleted, Amazon Web +Services accounts, users, organizations, and organizational units (OUs) that you shared the +review template with will no longer be able to apply it to new workloads. + +# Arguments +- `client_request_token`: +- `share_id`: +- `template_arn`: The review template ARN. + +""" +function delete_template_share( + ClientRequestToken, + ShareId, + TemplateArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "DELETE", + "/templates/shares/$(TemplateArn)/$(ShareId)", + Dict{String,Any}("ClientRequestToken" => ClientRequestToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_template_share( + ClientRequestToken, + ShareId, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "DELETE", + "/templates/shares/$(TemplateArn)/$(ShareId)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClientRequestToken" => ClientRequestToken), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_workload(client_request_token, workload_id) delete_workload(client_request_token, workload_id, params::Dict{String,<:Any}) @@ -1021,6 +1247,30 @@ function get_consolidated_report( ) end +""" + get_global_settings() + get_global_settings(params::Dict{String,<:Any}) + +Global settings for all workloads. + +""" +function get_global_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return wellarchitected( + "GET", "/global-settings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_global_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/global-settings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_lens(lens_alias) get_lens(lens_alias, params::Dict{String,<:Any}) @@ -1267,6 +1517,112 @@ function get_profile_template( ) end +""" + get_review_template(template_arn) + get_review_template(template_arn, params::Dict{String,<:Any}) + +Get review template. + +# Arguments +- `template_arn`: The review template ARN. + +""" +function get_review_template(TemplateArn; aws_config::AbstractAWSConfig=global_aws_config()) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_review_template( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_review_template_answer(lens_alias, question_id, template_arn) + get_review_template_answer(lens_alias, question_id, template_arn, params::Dict{String,<:Any}) + +Get review template answer. + +# Arguments +- `lens_alias`: +- `question_id`: +- `template_arn`: The review template ARN. + +""" +function get_review_template_answer( + LensAlias, QuestionId, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers/$(QuestionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_review_template_answer( + LensAlias, + QuestionId, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers/$(QuestionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_review_template_lens_review(lens_alias, template_arn) + get_review_template_lens_review(lens_alias, template_arn, params::Dict{String,<:Any}) + +Get a lens review associated with a review template. + +# Arguments +- `lens_alias`: +- `template_arn`: The review template ARN. + +""" +function get_review_template_lens_review( + LensAlias, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_review_template_lens_review( + LensAlias, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_workload(workload_id) get_workload(workload_id, params::Dict{String,<:Any}) @@ -1544,7 +1900,7 @@ end list_lens_review_improvements(lens_alias, workload_id) list_lens_review_improvements(lens_alias, workload_id, params::Dict{String,<:Any}) -List lens review improvements. +List the improvements of a particular lens review. # Arguments - `lens_alias`: @@ -1633,7 +1989,7 @@ List the lens shares associated with the lens. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to return for this request. - `"NextToken"`: -- `"SharedWithPrefix"`: The Amazon Web Services account ID, IAM role, organization ID, or +- `"SharedWithPrefix"`: The Amazon Web Services account ID, organization ID, or organizational unit (OU) ID with which the lens is shared. - `"Status"`: """ @@ -1732,6 +2088,8 @@ List lens notifications. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to return for this request. - `"NextToken"`: +- `"ResourceArn"`: The ARN for the related resource for the notification. Only one of + WorkloadID or ResourceARN should be specified. - `"WorkloadId"`: """ function list_notifications(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1796,7 +2154,7 @@ List profile shares. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to return for this request. - `"NextToken"`: -- `"SharedWithPrefix"`: The Amazon Web Services account ID, IAM role, organization ID, or +- `"SharedWithPrefix"`: The Amazon Web Services account ID, organization ID, or organizational unit (OU) ID with which the profile is shared. - `"Status"`: """ @@ -1832,7 +2190,8 @@ List profiles. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: - `"NextToken"`: -- `"ProfileNamePrefix"`: Prefix for profile name. +- `"ProfileNamePrefix"`: An optional string added to the beginning of each profile name + returned in the results. - `"ProfileOwnerType"`: Profile owner type. """ function list_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1852,11 +2211,82 @@ function list_profiles( ) end +""" + list_review_template_answers(lens_alias, template_arn) + list_review_template_answers(lens_alias, template_arn, params::Dict{String,<:Any}) + +List the answers of a review template. + +# Arguments +- `lens_alias`: +- `template_arn`: The ARN of the review template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return for this request. +- `"NextToken"`: +- `"PillarId"`: +""" +function list_review_template_answers( + LensAlias, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_review_template_answers( + LensAlias, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "GET", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_review_templates() + list_review_templates(params::Dict{String,<:Any}) + +List review templates. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: +- `"NextToken"`: +""" +function list_review_templates(; aws_config::AbstractAWSConfig=global_aws_config()) + return wellarchitected( + "GET", "/reviewTemplates"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_review_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/reviewTemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_share_invitations() list_share_invitations(params::Dict{String,<:Any}) -List the workload invitations. +List the share invitations. WorkloadNamePrefix, LensNamePrefix, ProfileNamePrefix, and +TemplateNamePrefix are mutually exclusive. Use the parameter that matches your +ShareResourceType. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1864,8 +2294,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in the results. - `"MaxResults"`: The maximum number of results to return for this request. - `"NextToken"`: -- `"ProfileNamePrefix"`: Profile name prefix. +- `"ProfileNamePrefix"`: An optional string added to the beginning of each profile name + returned in the results. - `"ShareResourceType"`: The type of share invitations to be returned. +- `"TemplateNamePrefix"`: An optional string added to the beginning of each review template + name returned in the results. - `"WorkloadNamePrefix"`: """ function list_share_invitations(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1890,7 +2323,7 @@ end list_tags_for_resource(workload_arn, params::Dict{String,<:Any}) List the tags for a resource. The WorkloadArn parameter can be a workload ARN, a custom -lens ARN, or a profile ARN. +lens ARN, a profile ARN, or review template ARN. # Arguments - `workload_arn`: @@ -1920,6 +2353,47 @@ function list_tags_for_resource( ) end +""" + list_template_shares(template_arn) + list_template_shares(template_arn, params::Dict{String,<:Any}) + +List review template shares. + +# Arguments +- `template_arn`: The review template ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return for this request. +- `"NextToken"`: +- `"SharedWithPrefix"`: The Amazon Web Services account ID, organization ID, or + organizational unit (OU) ID with which the profile is shared. +- `"Status"`: +""" +function list_template_shares( + TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "GET", + "/templates/shares/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_template_shares( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "GET", + "/templates/shares/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_workload_shares(workload_id) list_workload_shares(workload_id, params::Dict{String,<:Any}) @@ -1933,7 +2407,7 @@ List the workload shares associated with the workload. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to return for this request. - `"NextToken"`: -- `"SharedWithPrefix"`: The Amazon Web Services account ID, IAM role, organization ID, or +- `"SharedWithPrefix"`: The Amazon Web Services account ID, organization ID, or organizational unit (OU) ID with which the workload is shared. - `"Status"`: """ @@ -1996,7 +2470,7 @@ end tag_resource(tags, workload_arn, params::Dict{String,<:Any}) Adds one or more tags to the specified resource. The WorkloadArn parameter can be a -workload ARN, a custom lens ARN, or a profile ARN. +workload ARN, a custom lens ARN, a profile ARN, or review template ARN. # Arguments - `tags`: The tags for the resource. @@ -2032,8 +2506,9 @@ end untag_resource(workload_arn, tag_keys, params::Dict{String,<:Any}) Deletes specified tags from a resource. The WorkloadArn parameter can be a workload ARN, a -custom lens ARN, or a profile ARN. To specify multiple tags, use separate tagKeys -parameters, for example: DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2 +custom lens ARN, a profile ARN, or review template ARN. To specify multiple tags, use +separate tagKeys parameters, for example: DELETE +/tags/WorkloadArn?tagKeys=key1&tagKeys=key2 # Arguments - `workload_arn`: @@ -2117,12 +2592,13 @@ end update_global_settings() update_global_settings(params::Dict{String,<:Any}) -Updates whether the Amazon Web Services account is opted into organization sharing and +Update whether the Amazon Web Services account is opted into organization sharing and discovery integration features. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DiscoveryIntegrationStatus"`: The status of discovery support settings. +- `"JiraConfiguration"`: The status of Jira integration settings. - `"OrganizationSharingStatus"`: The status of organization sharing settings. """ function update_global_settings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2142,6 +2618,60 @@ function update_global_settings( ) end +""" + update_integration(client_request_token, integrating_service, workload_id) + update_integration(client_request_token, integrating_service, workload_id, params::Dict{String,<:Any}) + +Update integration features. + +# Arguments +- `client_request_token`: +- `integrating_service`: Which integrated service to update. +- `workload_id`: + +""" +function update_integration( + ClientRequestToken, + IntegratingService, + WorkloadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/workloads/$(WorkloadId)/updateIntegration", + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, + "IntegratingService" => IntegratingService, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_integration( + ClientRequestToken, + IntegratingService, + WorkloadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "POST", + "/workloads/$(WorkloadId)/updateIntegration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClientRequestToken" => ClientRequestToken, + "IntegratingService" => IntegratingService, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_lens_review(lens_alias, workload_id) update_lens_review(lens_alias, workload_id, params::Dict{String,<:Any}) @@ -2154,6 +2684,7 @@ Update lens review for a particular workload. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"JiraConfiguration"`: Configuration of the Jira integration. - `"LensNotes"`: - `"PillarNotes"`: """ @@ -2218,6 +2749,133 @@ function update_profile( ) end +""" + update_review_template(template_arn) + update_review_template(template_arn, params::Dict{String,<:Any}) + +Update a review template. + +# Arguments +- `template_arn`: The review template ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: The review template description. +- `"LensesToAssociate"`: A list of lens aliases or ARNs to apply to the review template. +- `"LensesToDisassociate"`: A list of lens aliases or ARNs to unapply to the review + template. The wellarchitected lens cannot be unapplied. +- `"Notes"`: +- `"TemplateName"`: The review template name. +""" +function update_review_template( + TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_review_template( + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_review_template_answer(lens_alias, question_id, template_arn) + update_review_template_answer(lens_alias, question_id, template_arn, params::Dict{String,<:Any}) + +Update a review template answer. + +# Arguments +- `lens_alias`: +- `question_id`: +- `template_arn`: The review template ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChoiceUpdates"`: A list of choices to be updated. +- `"IsApplicable"`: +- `"Notes"`: +- `"Reason"`: The update reason. +- `"SelectedChoices"`: +""" +function update_review_template_answer( + LensAlias, QuestionId, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers/$(QuestionId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_review_template_answer( + LensAlias, + QuestionId, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/answers/$(QuestionId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_review_template_lens_review(lens_alias, template_arn) + update_review_template_lens_review(lens_alias, template_arn, params::Dict{String,<:Any}) + +Update a lens review associated with a review template. + +# Arguments +- `lens_alias`: +- `template_arn`: The review template ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LensNotes"`: +- `"PillarNotes"`: +""" +function update_review_template_lens_review( + LensAlias, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_review_template_lens_review( + LensAlias, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "PATCH", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_share_invitation(share_invitation_action, share_invitation_id) update_share_invitation(share_invitation_action, share_invitation_id, params::Dict{String,<:Any}) @@ -2291,6 +2949,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys acknowledged that the Review owner field is required. If a Review owner is not added to the workload within 60 days of acknowledgement, access to the workload is restricted until an owner is added. +- `"JiraConfiguration"`: Configuration of the Jira integration. - `"NonAwsRegions"`: - `"Notes"`: - `"PillarPriorities"`: @@ -2448,3 +3107,42 @@ function upgrade_profile_version( feature_set=SERVICE_FEATURE_SET, ) end + +""" + upgrade_review_template_lens_review(lens_alias, template_arn) + upgrade_review_template_lens_review(lens_alias, template_arn, params::Dict{String,<:Any}) + +Upgrade the lens review of a review template. + +# Arguments +- `lens_alias`: +- `template_arn`: The ARN of the review template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientRequestToken"`: +""" +function upgrade_review_template_lens_review( + LensAlias, TemplateArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return wellarchitected( + "PUT", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/upgrade"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function upgrade_review_template_lens_review( + LensAlias, + TemplateArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wellarchitected( + "PUT", + "/reviewTemplates/$(TemplateArn)/lensReviews/$(LensAlias)/upgrade", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/wisdom.jl b/src/services/wisdom.jl index b328d4f2bb..cd15ff4974 100644 --- a/src/services/wisdom.jl +++ b/src/services/wisdom.jl @@ -20,7 +20,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"description"`: The description of the assistant. -- `"serverSideEncryptionConfiguration"`: The KMS key used for encryption. +- `"serverSideEncryptionConfiguration"`: The configuration information for the customer + managed key used for encryption. The customer managed key must have a policy that allows + kms:CreateGrant, kms:DescribeKey, and kms:Decrypt/kms:GenerateDataKey permissions to the + IAM identity using the key to invoke Wisdom. To use Wisdom with chat, the key policy must + also allow kms:Decrypt, kms:GenerateDataKey*, and kms:DescribeKey permissions to the + connect.amazonaws.com service principal. For more information about setting up a customer + managed key for Wisdom, see Enable Amazon Connect Wisdom for your instance. - `"tags"`: The tags used to organize, track, or control access for this resource. """ function create_assistant(name, type; aws_config::AbstractAWSConfig=global_aws_config()) @@ -128,8 +134,9 @@ Creates Wisdom content. Before to calling this API, use StartContentUpload to up asset. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. - `name`: The name of the content. Each piece of content in a knowledge base must have a unique name. You can retrieve a piece of content using only its knowledge base and its name with the SearchContent API. @@ -212,7 +219,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"description"`: The description. - `"renderingConfiguration"`: Information about how to render the content. -- `"serverSideEncryptionConfiguration"`: The KMS key used for encryption. +- `"serverSideEncryptionConfiguration"`: The configuration information for the customer + managed key used for encryption. This KMS key must have a policy that allows + kms:CreateGrant, kms:DescribeKey, and kms:Decrypt/kms:GenerateDataKey permissions to the + IAM identity using the key to invoke Wisdom. For more information about setting up a + customer managed key for Wisdom, see Enable Amazon Connect Wisdom for your instance. - `"sourceConfiguration"`: The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases. - `"tags"`: The tags used to organize, track, or control access for this resource. @@ -257,6 +268,76 @@ function create_knowledge_base( ) end +""" + create_quick_response(content, knowledge_base_id, name) + create_quick_response(content, knowledge_base_id, name, params::Dict{String,<:Any}) + +Creates a Wisdom quick response. + +# Arguments +- `content`: The content of the quick response. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. +- `name`: The name of the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"channels"`: The Amazon Connect channels this quick response applies to. +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"contentType"`: The media type of the quick response content. Use + application/x.quickresponse;format=plain for a quick response written in plain text. Use + application/x.quickresponse;format=markdown for a quick response written in richtext. +- `"description"`: The description of the quick response. +- `"groupingConfiguration"`: The configuration information of the user groups that the + quick response is accessible to. +- `"isActive"`: Whether the quick response is active. +- `"language"`: The language code value for the language in which the quick response is + written. The supported language codes include de_DE, en_US, es_ES, fr_FR, id_ID, it_IT, + ja_JP, ko_KR, pt_BR, zh_CN, zh_TW +- `"shortcutKey"`: The shortcut key of the quick response. The value should be unique + across the knowledge base. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_quick_response( + content, knowledgeBaseId, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + Dict{String,Any}( + "content" => content, "name" => name, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_quick_response( + content, + knowledgeBaseId, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "content" => content, "name" => name, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_session(assistant_id, name) create_session(assistant_id, name, params::Dict{String,<:Any}) @@ -389,8 +470,9 @@ Deletes the content. # Arguments - `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN. -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. """ function delete_content( @@ -418,6 +500,43 @@ function delete_content( ) end +""" + delete_import_job(import_job_id, knowledge_base_id) + delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the quick response import job. + +# Arguments +- `import_job_id`: The identifier of the import job to be deleted. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. + +""" +function delete_import_job( + importJobId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_import_job( + importJobId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_knowledge_base(knowledge_base_id) delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) @@ -458,6 +577,44 @@ function delete_knowledge_base( ) end +""" + delete_quick_response(knowledge_base_id, quick_response_id) + delete_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Deletes a quick response. + +# Arguments +- `knowledge_base_id`: The knowledge base from which the quick response is deleted. The + identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base + if you're storing Wisdom Content resource to it. +- `quick_response_id`: The identifier of the quick response to delete. + +""" +function delete_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_assistant(assistant_id) get_assistant(assistant_id, params::Dict{String,<:Any}) @@ -538,8 +695,9 @@ Retrieves content, including a pre-signed URL to download the content. # Arguments - `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN. -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. """ function get_content( @@ -576,8 +734,9 @@ Retrieves summary information about the content. # Arguments - `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN. -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. """ function get_content_summary( @@ -605,6 +764,42 @@ function get_content_summary( ) end +""" + get_import_job(import_job_id, knowledge_base_id) + get_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) + +Retrieves the started import job. + +# Arguments +- `import_job_id`: The identifier of the import job to retrieve. +- `knowledge_base_id`: The identifier of the knowledge base that the import job belongs to. + +""" +function get_import_job( + importJobId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_import_job( + importJobId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs/$(importJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_knowledge_base(knowledge_base_id) get_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) @@ -612,8 +807,9 @@ end Retrieves information about the knowledge base. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. """ function get_knowledge_base( @@ -640,6 +836,43 @@ function get_knowledge_base( ) end +""" + get_quick_response(knowledge_base_id, quick_response_id) + get_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Retrieves the quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should be a + QUICK_RESPONSES type knowledge base. +- `quick_response_id`: The identifier of the quick response. + +""" +function get_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_recommendations(assistant_id, session_id) get_recommendations(assistant_id, session_id, params::Dict{String,<:Any}) @@ -799,8 +1032,9 @@ end Lists the content. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -830,6 +1064,47 @@ function list_contents( ) end +""" + list_import_jobs(knowledge_base_id) + list_import_jobs(knowledge_base_id, params::Dict{String,<:Any}) + +Lists information about import jobs. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_import_jobs( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_import_jobs( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_knowledge_bases() list_knowledge_bases(params::Dict{String,<:Any}) @@ -859,6 +1134,47 @@ function list_knowledge_bases( ) end +""" + list_quick_responses(knowledge_base_id) + list_quick_responses(knowledge_base_id, params::Dict{String,<:Any}) + +Lists information about quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_quick_responses( + knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_quick_responses( + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -996,8 +1312,9 @@ end Removes a URI template from a knowledge base. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. """ function remove_knowledge_base_template_uri( @@ -1032,8 +1349,9 @@ Searches for content in a specified knowledge base. Can be used to get a specifi resource by its name. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. - `search_expression`: The search expression to filter results. # Optional Parameters @@ -1072,6 +1390,56 @@ function search_content( ) end +""" + search_quick_responses(knowledge_base_id, search_expression) + search_quick_responses(knowledge_base_id, search_expression, params::Dict{String,<:Any}) + +Searches existing Wisdom quick responses in a Wisdom knowledge base. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should be a + QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain + the ARN. +- `search_expression`: The search expression for querying the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attributes"`: The user-defined Amazon Connect contact attributes to be resolved when + search results are returned. +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function search_quick_responses( + knowledgeBaseId, searchExpression; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search/quickResponses", + Dict{String,Any}("searchExpression" => searchExpression); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_quick_responses( + knowledgeBaseId, + searchExpression, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/search/quickResponses", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("searchExpression" => searchExpression), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_sessions(assistant_id, search_expression) search_sessions(assistant_id, search_expression, params::Dict{String,<:Any}) @@ -1130,9 +1498,14 @@ an existing resource. You can only upload content to a knowledge base of type CU # Arguments - `content_type`: The type of content to upload. -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"presignedUrlTimeToLive"`: The expected expiration time of the generated presigned URL, + specified in minutes. """ function start_content_upload( contentType, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1162,6 +1535,77 @@ function start_content_upload( ) end +""" + start_import_job(import_job_type, knowledge_base_id, upload_id) + start_import_job(import_job_type, knowledge_base_id, upload_id, params::Dict{String,<:Any}) + +Start an asynchronous job to import Wisdom resources from an uploaded source file. Before +calling this API, use StartContentUpload to upload an asset that contains the resource +data. For importing Wisdom quick responses, you need to upload a csv file including the +quick responses. For information about how to format the csv file for importing quick +responses, see Import quick responses. + +# Arguments +- `import_job_type`: The type of the import job. For importing quick response resource, + set the value to QUICK_RESPONSES. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. For importing Wisdom quick + responses, this should be a QUICK_RESPONSES type knowledge base. +- `upload_id`: A pointer to the uploaded asset. This value is returned by + StartContentUpload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: The tags used to organize, track, or control access for this resource. +- `"externalSourceConfiguration"`: The configuration information of the external source + that the resource data are imported from. +- `"metadata"`: The metadata fields of the imported Wisdom resources. +""" +function start_import_job( + importJobType, + knowledgeBaseId, + uploadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + Dict{String,Any}( + "importJobType" => importJobType, + "uploadId" => uploadId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_import_job( + importJobType, + knowledgeBaseId, + uploadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/importJobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "importJobType" => importJobType, + "uploadId" => uploadId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1243,7 +1687,9 @@ Updates information about the content. # Arguments - `content_id`: The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN. -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the ARN +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1299,8 +1745,9 @@ value, and you can set the template URI to https://myInstanceName.lightning.force.com/lightning/r/Knowledge__kav/*{Id}*/view. # Arguments -- `knowledge_base_id`: The identifier of the knowledge base. Can be either the ID or the - ARN. URLs cannot contain the ARN. +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. - `template_uri`: The template URI to update. """ @@ -1331,3 +1778,62 @@ function update_knowledge_base_template_uri( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_quick_response(knowledge_base_id, quick_response_id) + update_quick_response(knowledge_base_id, quick_response_id, params::Dict{String,<:Any}) + +Updates an existing Wisdom quick response. + +# Arguments +- `knowledge_base_id`: The identifier of the knowledge base. This should not be a + QUICK_RESPONSES type knowledge base if you're storing Wisdom Content resource to it. Can be + either the ID or the ARN. URLs cannot contain the ARN. +- `quick_response_id`: The identifier of the quick response. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"channels"`: The Amazon Connect contact channels this quick response applies to. The + supported contact channel types include Chat. +- `"content"`: The updated content of the quick response. +- `"contentType"`: The media type of the quick response content. Use + application/x.quickresponse;format=plain for quick response written in plain text. Use + application/x.quickresponse;format=markdown for quick response written in richtext. +- `"description"`: The updated description of the quick response. +- `"groupingConfiguration"`: The updated grouping configuration of the quick response. +- `"isActive"`: Whether the quick response is active. +- `"language"`: The language code value for the language in which the quick response is + written. The supported language codes include de_DE, en_US, es_ES, fr_FR, id_ID, it_IT, + ja_JP, ko_KR, pt_BR, zh_CN, zh_TW +- `"name"`: The name of the quick response. +- `"removeDescription"`: Whether to remove the description from the quick response. +- `"removeGroupingConfiguration"`: Whether to remove the grouping configuration of the + quick response. +- `"removeShortcutKey"`: Whether to remove the shortcut key of the quick response. +- `"shortcutKey"`: The shortcut key of the quick response. The value should be unique + across the knowledge base. +""" +function update_quick_response( + knowledgeBaseId, quickResponseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_quick_response( + knowledgeBaseId, + quickResponseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return wisdom( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/quickResponses/$(quickResponseId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/workmail.jl b/src/services/workmail.jl index cf9e44cc99..5cdf416b5d 100644 --- a/src/services/workmail.jl +++ b/src/services/workmail.jl @@ -11,9 +11,15 @@ using AWS.UUIDs Adds a member (user or group) to the resource's set of delegates. # Arguments -- `entity_id`: The member (user or group) to associate to the resource. +- `entity_id`: The member (user or group) to associate to the resource. The entity ID can + accept UserId or GroupID, Username or Groupname, or email. Entity: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: entity@domain.tld Entity: entity - `organization_id`: The organization under which the resource exists. -- `resource_id`: The resource for which members (users or groups) are associated. +- `resource_id`: The resource for which members (users or groups) are associated. The + identifier can accept ResourceId, Resourcename, or email. The following identity formats + are available: Resource ID: r-0123456789a0123456789b0123456789 Email address: + resource@domain.tld Resource name: resource """ function associate_delegate_to_resource( @@ -62,8 +68,15 @@ end Adds a member (user or group) to the group's set. # Arguments -- `group_id`: The group to which the member (user or group) is associated. -- `member_id`: The member (user or group) to associate to the group. +- `group_id`: The group to which the member (user or group) is associated. The identifier + can accept GroupId, Groupname, or email. The following identity formats are available: + Group ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: group@domain.tld Group + name: group +- `member_id`: The member (user or group) to associate to the group. The member ID can + accept UserID or GroupId, Username or Groupname, or email. Member: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: member@domain.tld Member name: member - `organization_id`: The organization under which the group exists. """ @@ -321,6 +334,10 @@ Creates a group that can be used in WorkMail by calling the RegisterToWorkMail o - `name`: The name of the group. - `organization_id`: The organization under which the group is to be created. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HiddenFromGlobalAddressList"`: If this parameter is enabled, the group will be hidden + from the address book. """ function create_group( Name, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() @@ -553,6 +570,11 @@ Creates a new WorkMail resource. is created. - `type`: The type of the new resource. The available types are equipment and room. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: Resource description. +- `"HiddenFromGlobalAddressList"`: If this parameter is enabled, the resource will be + hidden from the address book. """ function create_resource( Name, OrganizationId, Type; aws_config::AbstractAWSConfig=global_aws_config() @@ -590,8 +612,8 @@ function create_resource( end """ - create_user(display_name, name, organization_id, password) - create_user(display_name, name, organization_id, password, params::Dict{String,<:Any}) + create_user(display_name, name, organization_id) + create_user(display_name, name, organization_id, params::Dict{String,<:Any}) Creates a user who can be used in WorkMail by calling the RegisterToWorkMail operation. @@ -600,23 +622,24 @@ Creates a user who can be used in WorkMail by calling the RegisterToWorkMail ope - `name`: The name for the new user. WorkMail directory user names have a maximum length of 64. All others have a maximum length of 20. - `organization_id`: The identifier of the organization for which the user is created. -- `password`: The password for the new user. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FirstName"`: The first name of the new user. +- `"HiddenFromGlobalAddressList"`: If this parameter is enabled, the user will be hidden + from the address book. +- `"LastName"`: The last name of the new user. +- `"Password"`: The password for the new user. +- `"Role"`: The role of the new user. You cannot pass SYSTEM_USER or RESOURCE role in a + single request. When a user role is not selected, the default role of USER is selected. """ function create_user( - DisplayName, - Name, - OrganizationId, - Password; - aws_config::AbstractAWSConfig=global_aws_config(), + DisplayName, Name, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() ) return workmail( "CreateUser", Dict{String,Any}( - "DisplayName" => DisplayName, - "Name" => Name, - "OrganizationId" => OrganizationId, - "Password" => Password, + "DisplayName" => DisplayName, "Name" => Name, "OrganizationId" => OrganizationId ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -626,7 +649,6 @@ function create_user( DisplayName, Name, OrganizationId, - Password, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -639,7 +661,6 @@ function create_user( "DisplayName" => DisplayName, "Name" => Name, "OrganizationId" => OrganizationId, - "Password" => Password, ), params, ), @@ -830,7 +851,10 @@ end Deletes a group from WorkMail. # Arguments -- `group_id`: The identifier of the group to be deleted. +- `group_id`: The identifier of the group to be deleted. The identifier can be the GroupId, + or Groupname. The following identity formats are available: Group ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Group name: group - `organization_id`: The organization that contains the group. """ @@ -917,9 +941,16 @@ end Deletes permissions granted to a member (user or group). # Arguments -- `entity_id`: The identifier of the member (user or group) that owns the mailbox. -- `grantee_id`: The identifier of the member (user or group) for which to delete granted - permissions. +- `entity_id`: The identifier of the entity that owns the mailbox. The identifier can be + UserId or Group Id, Username or Groupname, or email. Entity ID: + 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: entity@domain.tld Entity + name: entity +- `grantee_id`: The identifier of the entity for which to delete granted permissions. The + identifier can be UserId, ResourceID, or Group Id, Username or Groupname, or email. + Grantee ID: 12345678-1234-1234-1234-123456789012,r-0123456789a0123456789b0123456789, or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: grantee@domain.tld Grantee + name: grantee - `organization_id`: The identifier of the organization under which the member (user or group) exists. @@ -1086,6 +1117,8 @@ more information, see Removing an organization in the WorkMail Administrator Gui # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: The idempotency token associated with the request. +- `"ForceDelete"`: Deletes a WorkMail organization even if the organization has enabled + users. """ function delete_organization( DeleteDirectory, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1134,7 +1167,9 @@ Deletes the specified resource. # Arguments - `organization_id`: The identifier associated with the organization from which the resource is deleted. -- `resource_id`: The identifier of the resource to be deleted. +- `resource_id`: The identifier of the resource to be deleted. The identifier can accept + ResourceId, or Resourcename. The following identity formats are available: Resource ID: + r-0123456789a0123456789b0123456789 Resource name: resource """ function delete_resource( @@ -1221,7 +1256,10 @@ days before they are permanently removed. # Arguments - `organization_id`: The organization that contains the user to be deleted. -- `user_id`: The identifier of the user to be deleted. +- `user_id`: The identifier of the user to be deleted. The identifier can be the UserId or + Username. The following identity formats are available: User ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 User + name: user """ function delete_user( @@ -1263,7 +1301,11 @@ the mailbox and schedules it for clean-up. WorkMail keeps mailboxes for 30 days are permanently removed. The functionality in the console is Disable. # Arguments -- `entity_id`: The identifier for the member (user or group) to be updated. +- `entity_id`: The identifier for the member to be updated. The identifier can be UserId, + ResourceId, or Group Id, Username, Resourcename, or Groupname, or email. Entity ID: + 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: entity@domain.tld Entity + name: entity - `organization_id`: The identifier for the organization under which the WorkMail entity exists. @@ -1382,6 +1424,47 @@ function describe_email_monitoring_configuration( ) end +""" + describe_entity(email, organization_id) + describe_entity(email, organization_id, params::Dict{String,<:Any}) + +Returns basic details about an entity in WorkMail. + +# Arguments +- `email`: The email under which the entity exists. +- `organization_id`: The identifier for the organization under which the entity exists. + +""" +function describe_entity( + Email, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workmail( + "DescribeEntity", + Dict{String,Any}("Email" => Email, "OrganizationId" => OrganizationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_entity( + Email, + OrganizationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workmail( + "DescribeEntity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Email" => Email, "OrganizationId" => OrganizationId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_group(group_id, organization_id) describe_group(group_id, organization_id, params::Dict{String,<:Any}) @@ -1389,7 +1472,10 @@ end Returns the data available for the group. # Arguments -- `group_id`: The identifier for the group to be described. +- `group_id`: The identifier for the group to be described. The identifier can accept + GroupId, Groupname, or email. The following identity formats are available: Group ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: group@domain.tld Group name: group - `organization_id`: The identifier for the organization under which the group exists. """ @@ -1543,7 +1629,10 @@ Returns the data available for the resource. # Arguments - `organization_id`: The identifier associated with the organization for which the resource is described. -- `resource_id`: The identifier of the resource to be described. +- `resource_id`: The identifier of the resource to be described. The identifier can accept + ResourceId, Resourcename, or email. The following identity formats are available: + Resource ID: r-0123456789a0123456789b0123456789 Email address: resource@domain.tld + Resource name: resource """ function describe_resource( @@ -1586,7 +1675,10 @@ Provides information regarding the user. # Arguments - `organization_id`: The identifier for the organization under which the user exists. -- `user_id`: The identifier for the user to be described. +- `user_id`: The identifier for the user to be described. The identifier can be the UserId, + Username, or email. The following identity formats are available: User ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: user@domain.tld User name: user """ function describe_user( @@ -1627,10 +1719,15 @@ Removes a member from the resource's set of delegates. # Arguments - `entity_id`: The identifier for the member (user, group) to be removed from the - resource's delegates. + resource's delegates. The entity ID can accept UserId or GroupID, Username or Groupname, or + email. Entity: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: entity@domain.tld Entity: + entity - `organization_id`: The identifier for the organization under which the resource exists. - `resource_id`: The identifier of the resource from which delegates' set members are - removed. + removed. The identifier can accept ResourceId, Resourcename, or email. The following + identity formats are available: Resource ID: r-0123456789a0123456789b0123456789 Email + address: resource@domain.tld Resource name: resource """ function disassociate_delegate_from_resource( @@ -1679,8 +1776,15 @@ end Removes a member from a group. # Arguments -- `group_id`: The identifier for the group from which members are removed. -- `member_id`: The identifier for the member to be removed to the group. +- `group_id`: The identifier for the group from which members are removed. The identifier + can accept GroupId, Groupname, or email. The following identity formats are available: + Group ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: group@domain.tld Group + name: group +- `member_id`: The identifier for the member to be removed from the group. The member ID + can accept UserID or GroupId, Username or Groupname, or email. Member ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: member@domain.tld Member name: member - `organization_id`: The identifier for the organization under which the group exists. """ @@ -1970,7 +2074,11 @@ Requests a user's mailbox details for a specified organization and user. # Arguments - `organization_id`: The identifier for the organization that contains the user whose mailbox details are being requested. -- `user_id`: The identifier for the user whose mailbox details are being requested. +- `user_id`: The identifier for the user whose mailbox details are being requested. The + identifier can be the UserId, Username, or email. The following identity formats are + available: User ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: user@domain.tld User name: + user """ function get_mailbox_details( @@ -2232,7 +2340,10 @@ Returns an overview of the members of a group. Users and groups can be members o # Arguments - `group_id`: The identifier for the group to which the members (users or groups) are - associated. + associated. The identifier can accept GroupId, Groupname, or email. The following identity + formats are available: Group ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: group@domain.tld Group + name: group - `organization_id`: The identifier for the organization under which the group exists. # Optional Parameters @@ -2282,6 +2393,8 @@ Returns summaries of the organization's groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Limit the search results based on the filter criteria. Only one filter per + request is supported. - `"MaxResults"`: The maximum number of results to return in a single call. - `"NextToken"`: The token to use to retrieve the next page of results. The first call does not contain any tokens. @@ -2309,6 +2422,58 @@ function list_groups( ) end +""" + list_groups_for_entity(entity_id, organization_id) + list_groups_for_entity(entity_id, organization_id, params::Dict{String,<:Any}) + +Returns all the groups to which an entity belongs. + +# Arguments +- `entity_id`: The identifier for the entity. The entity ID can accept UserId or GroupID, + Username or Groupname, or email. Entity ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: entity@domain.tld Entity + name: entity +- `organization_id`: The identifier for the organization under which the entity exists. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Limit the search results based on the filter criteria. +- `"MaxResults"`: The maximum number of results to return in a single call. +- `"NextToken"`: The token to use to retrieve the next page of results. The first call does + not contain any tokens. +""" +function list_groups_for_entity( + EntityId, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workmail( + "ListGroupsForEntity", + Dict{String,Any}("EntityId" => EntityId, "OrganizationId" => OrganizationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_groups_for_entity( + EntityId, + OrganizationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workmail( + "ListGroupsForEntity", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "EntityId" => EntityId, "OrganizationId" => OrganizationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_impersonation_roles(organization_id) list_impersonation_roles(organization_id, params::Dict{String,<:Any}) @@ -2437,8 +2602,11 @@ end Lists the mailbox permissions associated with a user, group, or resource mailbox. # Arguments -- `entity_id`: The identifier of the user, group, or resource for which to list mailbox - permissions. +- `entity_id`: The identifier of the user, or resource for which to list mailbox + permissions. The entity ID can accept UserId or ResourceId, Username or Resourcename, or + email. Entity ID: 12345678-1234-1234-1234-123456789012, or + r-0123456789a0123456789b0123456789 Email address: entity@domain.tld Entity name: entity + - `organization_id`: The identifier of the organization under which the user, group, or resource exists. @@ -2597,7 +2765,10 @@ and answer requests on behalf of the resource. # Arguments - `organization_id`: The identifier for the organization that contains the resource for which delegates are listed. -- `resource_id`: The identifier for the resource whose delegates are listed. +- `resource_id`: The identifier for the resource whose delegates are listed. The identifier + can accept ResourceId, Resourcename, or email. The following identity formats are + available: Resource ID: r-0123456789a0123456789b0123456789 Email address: + resource@domain.tld Resource name: resource # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2648,6 +2819,8 @@ Returns summaries of the organization's resources. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Limit the resource search results based on the filter criteria. You can only + use one filter per request. - `"MaxResults"`: The maximum number of results to return in a single call. - `"NextToken"`: The token to use to retrieve the next page of results. The first call does not contain any tokens. @@ -2721,6 +2894,8 @@ Returns summaries of the organization's users. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Limit the user search results based on the filter criteria. You can only use + one filter per request. - `"MaxResults"`: The maximum number of results to return in a single call. - `"NextToken"`: The token to use to retrieve the next page of results. The first call does not contain any tokens. @@ -2926,10 +3101,16 @@ end Sets permissions for a user, group, or resource. This replaces any pre-existing permissions. # Arguments -- `entity_id`: The identifier of the user, group, or resource for which to update mailbox - permissions. +- `entity_id`: The identifier of the user or resource for which to update mailbox + permissions. The identifier can be UserId, ResourceID, or Group Id, Username, Resourcename, + or Groupname, or email. Entity ID: 12345678-1234-1234-1234-123456789012, + r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234 Email + address: entity@domain.tld Entity name: entity - `grantee_id`: The identifier of the user, group, or resource to which to grant the - permissions. + permissions. The identifier can be UserId, ResourceID, or Group Id, Username, Resourcename, + or Groupname, or email. Grantee ID: 12345678-1234-1234-1234-123456789012, + r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234 Email + address: grantee@domain.tld Grantee name: grantee - `organization_id`: The identifier of the organization under which the user, group, or resource exists. - `permission_values`: The permissions granted to the grantee. SEND_AS allows the grantee @@ -3180,7 +3361,11 @@ information, see DeregisterFromWorkMail. # Arguments - `email`: The email for the user, group, or resource to be updated. -- `entity_id`: The identifier for the user, group, or resource to be updated. +- `entity_id`: The identifier for the user, group, or resource to be updated. The + identifier can accept UserId, ResourceId, or GroupId, or Username, Resourcename, or + Groupname. The following identity formats are available: Entity ID: + 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or + S-1-1-12-1234567890-123456789-123456789-1234 Entity name: entity - `organization_id`: The identifier for the organization under which the user, group, or resource exists. @@ -3282,7 +3467,12 @@ For more information, see Exporting mailbox content in the WorkMail Administrato # Arguments - `client_token`: The idempotency token for the client request. -- `entity_id`: The identifier of the user or resource associated with the mailbox. +- `entity_id`: The identifier of the user or resource associated with the mailbox. The + identifier can accept UserId or ResourceId, Username or Resourcename, or email. The + following identity formats are available: Entity ID: + 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789 , or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: entity@domain.tld Entity + name: entity - `kms_key_arn`: The Amazon Resource Name (ARN) of the symmetric AWS Key Management Service (AWS KMS) key that encrypts the exported mailbox content. - `organization_id`: The identifier associated with the organization. @@ -3579,6 +3769,54 @@ function update_default_mail_domain( ) end +""" + update_group(group_id, organization_id) + update_group(group_id, organization_id, params::Dict{String,<:Any}) + +Updates attibutes in a group. + +# Arguments +- `group_id`: The identifier for the group to be updated. The identifier can accept + GroupId, Groupname, or email. The following identity formats are available: Group ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: group@domain.tld Group name: group +- `organization_id`: The identifier for the organization under which the group exists. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"HiddenFromGlobalAddressList"`: If enabled, the group is hidden from the global address + list. +""" +function update_group( + GroupId, OrganizationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workmail( + "UpdateGroup", + Dict{String,Any}("GroupId" => GroupId, "OrganizationId" => OrganizationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_group( + GroupId, + OrganizationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workmail( + "UpdateGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("GroupId" => GroupId, "OrganizationId" => OrganizationId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_impersonation_role(impersonation_role_id, name, organization_id, rules, type) update_impersonation_role(impersonation_role_id, name, organization_id, rules, type, params::Dict{String,<:Any}) @@ -3657,7 +3895,11 @@ Updates a user's current mailbox quota for a specified organization and user. - `mailbox_quota`: The updated mailbox quota, in MB, for the specified user. - `organization_id`: The identifier for the organization that contains the user for whom to update the mailbox quota. -- `user_id`: The identifer for the user for whom to update the mailbox quota. +- `user_id`: The identifer for the user for whom to update the mailbox quota. The + identifier can be the UserId, Username, or email. The following identity formats are + available: User ID: 12345678-1234-1234-1234-123456789012 or + S-1-1-12-1234567890-123456789-123456789-1234 Email address: user@domain.tld User name: + user """ function update_mailbox_quota( @@ -3783,7 +4025,11 @@ and the email provided in the input is promoted as the primary. # Arguments - `email`: The value of the email to be updated as primary. -- `entity_id`: The user, group, or resource to update. +- `entity_id`: The user, group, or resource to update. The identifier can accept UseriD, + ResourceId, or GroupId, Username, Resourcename, or Groupname, or email. The following + identity formats are available: Entity ID: 12345678-1234-1234-1234-123456789012, + r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234 Email + address: entity@domain.tld Entity name: entity - `organization_id`: The organization that contains the user, group, or resource to update. """ @@ -3835,12 +4081,19 @@ performing another DescribeResource call. # Arguments - `organization_id`: The identifier associated with the organization for which the resource is updated. -- `resource_id`: The identifier of the resource to be updated. +- `resource_id`: The identifier of the resource to be updated. The identifier can accept + ResourceId, Resourcename, or email. The following identity formats are available: + Resource ID: r-0123456789a0123456789b0123456789 Email address: resource@domain.tld + Resource name: resource # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BookingOptions"`: The resource's booking options to be updated. +- `"Description"`: Updates the resource description. +- `"HiddenFromGlobalAddressList"`: If enabled, the resource is hidden from the global + address list. - `"Name"`: The name of the resource to be updated. +- `"Type"`: Updates the resource type. """ function update_resource( OrganizationId, ResourceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3873,3 +4126,67 @@ function update_resource( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_user(organization_id, user_id) + update_user(organization_id, user_id, params::Dict{String,<:Any}) + +Updates data for the user. To have the latest information, it must be preceded by a +DescribeUser call. The dataset in the request should be the one expected when performing +another DescribeUser call. + +# Arguments +- `organization_id`: The identifier for the organization under which the user exists. +- `user_id`: The identifier for the user to be updated. The identifier can be the UserId, + Username, or email. The following identity formats are available: User ID: + 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234 + Email address: user@domain.tld User name: user + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"City"`: Updates the user's city. +- `"Company"`: Updates the user's company. +- `"Country"`: Updates the user's country. +- `"Department"`: Updates the user's department. +- `"DisplayName"`: Updates the display name of the user. +- `"FirstName"`: Updates the user's first name. +- `"HiddenFromGlobalAddressList"`: If enabled, the user is hidden from the global address + list. +- `"Initials"`: Updates the user's initials. +- `"JobTitle"`: Updates the user's job title. +- `"LastName"`: Updates the user's last name. +- `"Office"`: Updates the user's office. +- `"Role"`: Updates the user role. You cannot pass SYSTEM_USER or RESOURCE. +- `"Street"`: Updates the user's street address. +- `"Telephone"`: Updates the user's contact details. +- `"ZipCode"`: Updates the user's zipcode. +""" +function update_user( + OrganizationId, UserId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workmail( + "UpdateUser", + Dict{String,Any}("OrganizationId" => OrganizationId, "UserId" => UserId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_user( + OrganizationId, + UserId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workmail( + "UpdateUser", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OrganizationId" => OrganizationId, "UserId" => UserId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/workspaces.jl b/src/services/workspaces.jl index c1239dca5f..2d5b341c5f 100644 --- a/src/services/workspaces.jl +++ b/src/services/workspaces.jl @@ -4,6 +4,42 @@ using AWS.AWSServices: workspaces using AWS.Compat using AWS.UUIDs +""" + accept_account_link_invitation(link_id) + accept_account_link_invitation(link_id, params::Dict{String,<:Any}) + +Accepts the account link invitation. There's currently no unlinking capability after you +accept the account linking invitation. + +# Arguments +- `link_id`: The identifier of the account link. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure + idempotent creation. +""" +function accept_account_link_invitation( + LinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "AcceptAccountLinkInvitation", + Dict{String,Any}("LinkId" => LinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_account_link_invitation( + LinkId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "AcceptAccountLinkInvitation", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("LinkId" => LinkId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_connection_alias(alias_id, resource_id) associate_connection_alias(alias_id, resource_id, params::Dict{String,<:Any}) @@ -89,6 +125,49 @@ function associate_ip_groups( ) end +""" + associate_workspace_application(application_id, workspace_id) + associate_workspace_application(application_id, workspace_id, params::Dict{String,<:Any}) + +Associates the specified application to the specified WorkSpace. + +# Arguments +- `application_id`: The identifier of the application. +- `workspace_id`: The identifier of the WorkSpace. + +""" +function associate_workspace_application( + ApplicationId, WorkspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "AssociateWorkspaceApplication", + Dict{String,Any}("ApplicationId" => ApplicationId, "WorkspaceId" => WorkspaceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_workspace_application( + ApplicationId, + WorkspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "AssociateWorkspaceApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationId" => ApplicationId, "WorkspaceId" => WorkspaceId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ authorize_ip_rules(group_id, user_rules) authorize_ip_rules(group_id, user_rules, params::Dict{String,<:Any}) @@ -191,6 +270,47 @@ function copy_workspace_image( ) end +""" + create_account_link_invitation(target_account_id) + create_account_link_invitation(target_account_id, params::Dict{String,<:Any}) + +Creates the account link invitation. + +# Arguments +- `target_account_id`: The identifier of the target account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure + idempotent creation. +""" +function create_account_link_invitation( + TargetAccountId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "CreateAccountLinkInvitation", + Dict{String,Any}("TargetAccountId" => TargetAccountId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_account_link_invitation( + TargetAccountId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateAccountLinkInvitation", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("TargetAccountId" => TargetAccountId), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_connect_client_add_in(name, resource_id, url) create_connect_client_add_in(name, resource_id, url, params::Dict{String,<:Any}) @@ -603,9 +723,11 @@ end create_workspaces(workspaces, params::Dict{String,<:Any}) Creates one or more WorkSpaces. This operation is asynchronous and returns before the -WorkSpaces are created. The MANUAL running mode value is only supported by Amazon +WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more -information, see Amazon WorkSpaces Core. +information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for +Linux bundles because WSP is the default protocol for those bundles. User-decoupled +WorkSpaces are only supported by Amazon WorkSpaces Core. # Arguments - `workspaces`: The WorkSpaces to create. You can specify up to 25 WorkSpaces. @@ -634,6 +756,41 @@ function create_workspaces( ) end +""" + delete_account_link_invitation(link_id) + delete_account_link_invitation(link_id, params::Dict{String,<:Any}) + +Deletes the account link invitation. + +# Arguments +- `link_id`: The identifier of the account link. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure + idempotent creation. +""" +function delete_account_link_invitation( + LinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DeleteAccountLinkInvitation", + Dict{String,Any}("LinkId" => LinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_account_link_invitation( + LinkId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DeleteAccountLinkInvitation", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("LinkId" => LinkId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_client_branding(platforms, resource_id) delete_client_branding(platforms, resource_id, params::Dict{String,<:Any}) @@ -887,6 +1044,45 @@ function delete_workspace_image( ) end +""" + deploy_workspace_applications(workspace_id) + deploy_workspace_applications(workspace_id, params::Dict{String,<:Any}) + +Deploys associated applications to the specified WorkSpace + +# Arguments +- `workspace_id`: The identifier of the WorkSpace. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Force"`: Indicates whether the force flag is applied for the specified WorkSpace. When + the force flag is enabled, it allows previously failed deployments to be retried. +""" +function deploy_workspace_applications( + WorkspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DeployWorkspaceApplications", + Dict{String,Any}("WorkspaceId" => WorkspaceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deploy_workspace_applications( + WorkspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DeployWorkspaceApplications", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("WorkspaceId" => WorkspaceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ deregister_workspace_directory(directory_id) deregister_workspace_directory(directory_id, params::Dict{String,<:Any}) @@ -984,6 +1180,140 @@ function describe_account_modifications( ) end +""" + describe_application_associations(application_id, associated_resource_types) + describe_application_associations(application_id, associated_resource_types, params::Dict{String,<:Any}) + +Describes the associations between the application and the specified associated resources. + +# Arguments +- `application_id`: The identifier of the specified application. +- `associated_resource_types`: The resource type of the associated resources. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of associations to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +""" +function describe_application_associations( + ApplicationId, + AssociatedResourceTypes; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DescribeApplicationAssociations", + Dict{String,Any}( + "ApplicationId" => ApplicationId, + "AssociatedResourceTypes" => AssociatedResourceTypes, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_associations( + ApplicationId, + AssociatedResourceTypes, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DescribeApplicationAssociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationId" => ApplicationId, + "AssociatedResourceTypes" => AssociatedResourceTypes, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_applications() + describe_applications(params::Dict{String,<:Any}) + +Describes the specified applications by filtering based on their compute types, license +availability, operating systems, and owners. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationIds"`: The identifiers of one or more applications. +- `"ComputeTypeNames"`: The compute types supported by the applications. +- `"LicenseType"`: The license availability for the applications. +- `"MaxResults"`: The maximum number of applications to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"OperatingSystemNames"`: The operating systems supported by the applications. +- `"Owner"`: The owner of the applications. +""" +function describe_applications(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "DescribeApplications"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_applications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeApplications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_bundle_associations(associated_resource_types, bundle_id) + describe_bundle_associations(associated_resource_types, bundle_id, params::Dict{String,<:Any}) + +Describes the associations between the applications and the specified bundle. + +# Arguments +- `associated_resource_types`: The resource types of the associated resource. +- `bundle_id`: The identifier of the bundle. + +""" +function describe_bundle_associations( + AssociatedResourceTypes, BundleId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeBundleAssociations", + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, "BundleId" => BundleId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_bundle_associations( + AssociatedResourceTypes, + BundleId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DescribeBundleAssociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, + "BundleId" => BundleId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_client_branding(resource_id) describe_client_branding(resource_id, params::Dict{String,<:Any}) @@ -1168,6 +1498,52 @@ function describe_connection_aliases( ) end +""" + describe_image_associations(associated_resource_types, image_id) + describe_image_associations(associated_resource_types, image_id, params::Dict{String,<:Any}) + +Describes the associations between the applications and the specified image. + +# Arguments +- `associated_resource_types`: The resource types of the associated resource. +- `image_id`: The identifier of the image. + +""" +function describe_image_associations( + AssociatedResourceTypes, ImageId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeImageAssociations", + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, "ImageId" => ImageId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_image_associations( + AssociatedResourceTypes, + ImageId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DescribeImageAssociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, + "ImageId" => ImageId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_ip_groups() describe_ip_groups(params::Dict{String,<:Any}) @@ -1229,6 +1605,53 @@ function describe_tags( ) end +""" + describe_workspace_associations(associated_resource_types, workspace_id) + describe_workspace_associations(associated_resource_types, workspace_id, params::Dict{String,<:Any}) + +Describes the associations betweens applications and the specified WorkSpace. + +# Arguments +- `associated_resource_types`: The resource types of the associated resources. +- `workspace_id`: The identifier of the WorkSpace. + +""" +function describe_workspace_associations( + AssociatedResourceTypes, WorkspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspaceAssociations", + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, + "WorkspaceId" => WorkspaceId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workspace_associations( + AssociatedResourceTypes, + WorkspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DescribeWorkspaceAssociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AssociatedResourceTypes" => AssociatedResourceTypes, + "WorkspaceId" => WorkspaceId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_workspace_bundles() describe_workspace_bundles(params::Dict{String,<:Any}) @@ -1420,6 +1843,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with any other filter. Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned. +- `"WorkspaceName"`: The name of the user-decoupled WorkSpace. """ function describe_workspaces(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( @@ -1542,6 +1966,73 @@ function disassociate_ip_groups( ) end +""" + disassociate_workspace_application(application_id, workspace_id) + disassociate_workspace_application(application_id, workspace_id, params::Dict{String,<:Any}) + +Disassociates the specified application from a WorkSpace. + +# Arguments +- `application_id`: The identifier of the application. +- `workspace_id`: The identifier of the WorkSpace. + +""" +function disassociate_workspace_application( + ApplicationId, WorkspaceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DisassociateWorkspaceApplication", + Dict{String,Any}("ApplicationId" => ApplicationId, "WorkspaceId" => WorkspaceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_workspace_application( + ApplicationId, + WorkspaceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "DisassociateWorkspaceApplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationId" => ApplicationId, "WorkspaceId" => WorkspaceId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_account_link() + get_account_link(params::Dict{String,<:Any}) + +Retrieves account link information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkId"`: The identifier of the account to link. +- `"LinkedAccountId"`: The identifier of the account link +""" +function get_account_link(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "GetAccountLink"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_account_link( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "GetAccountLink", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ import_client_branding(resource_id) import_client_branding(resource_id, params::Dict{String,<:Any}) @@ -1674,6 +2165,32 @@ function import_workspace_image( ) end +""" + list_account_links() + list_account_links(params::Dict{String,<:Any}) + +Lists all account links. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LinkStatusFilter"`: Filters the account based on their link status. +- `"MaxResults"`: The maximum number of accounts to return. +- `"NextToken"`: The token to use to retrieve the next page of results. This value is null + when there are no more results to return. +""" +function list_account_links(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "ListAccountLinks"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_account_links( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "ListAccountLinks", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_available_management_cidr_ranges(management_cidr_range_constraint) list_available_management_cidr_ranges(management_cidr_range_constraint, params::Dict{String,<:Any}) @@ -2083,8 +2600,8 @@ function modify_workspace_creation_properties( end """ - modify_workspace_properties(workspace_id, workspace_properties) - modify_workspace_properties(workspace_id, workspace_properties, params::Dict{String,<:Any}) + modify_workspace_properties(workspace_id) + modify_workspace_properties(workspace_id, params::Dict{String,<:Any}) Modifies the specified WorkSpace properties. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace. The MANUAL running mode @@ -2093,38 +2610,31 @@ allow-listed to use this value. For more information, see Amazon WorkSpaces Core # Arguments - `workspace_id`: The identifier of the WorkSpace. -- `workspace_properties`: The properties of the WorkSpace. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DataReplication"`: Indicates the data replication status. +- `"WorkspaceProperties"`: The properties of the WorkSpace. """ function modify_workspace_properties( - WorkspaceId, WorkspaceProperties; aws_config::AbstractAWSConfig=global_aws_config() + WorkspaceId; aws_config::AbstractAWSConfig=global_aws_config() ) return workspaces( "ModifyWorkspaceProperties", - Dict{String,Any}( - "WorkspaceId" => WorkspaceId, "WorkspaceProperties" => WorkspaceProperties - ); + Dict{String,Any}("WorkspaceId" => WorkspaceId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function modify_workspace_properties( WorkspaceId, - WorkspaceProperties, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return workspaces( "ModifyWorkspaceProperties", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "WorkspaceId" => WorkspaceId, - "WorkspaceProperties" => WorkspaceProperties, - ), - params, - ), + mergewith(_merge, Dict{String,Any}("WorkspaceId" => WorkspaceId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2182,8 +2692,9 @@ end reboot_workspaces(reboot_workspace_requests, params::Dict{String,<:Any}) Reboots the specified WorkSpaces. You cannot reboot a WorkSpace unless its state is -AVAILABLE or UNHEALTHY. This operation is asynchronous and returns before the WorkSpaces -have rebooted. +AVAILABLE, UNHEALTHY, or REBOOTING. Reboot a WorkSpace in the REBOOTING state only if your +WorkSpace has been stuck in the REBOOTING state for over 20 minutes. This operation is +asynchronous and returns before the WorkSpaces have rebooted. # Arguments - `reboot_workspace_requests`: The WorkSpaces to reboot. You can specify up to 25 @@ -2330,6 +2841,40 @@ function register_workspace_directory( ) end +""" + reject_account_link_invitation(link_id) + reject_account_link_invitation(link_id, params::Dict{String,<:Any}) + +Rejects the account link invitation. + +# Arguments +- `link_id`: The identifier of the account link + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: The client token of the account link invitation to reject. +""" +function reject_account_link_invitation( + LinkId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "RejectAccountLinkInvitation", + Dict{String,Any}("LinkId" => LinkId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_account_link_invitation( + LinkId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "RejectAccountLinkInvitation", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("LinkId" => LinkId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ restore_workspace(workspace_id) restore_workspace(workspace_id, params::Dict{String,<:Any}) diff --git a/src/services/workspaces_thin_client.jl b/src/services/workspaces_thin_client.jl new file mode 100644 index 0000000000..23dbb717cd --- /dev/null +++ b/src/services/workspaces_thin_client.jl @@ -0,0 +1,605 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: workspaces_thin_client +using AWS.Compat +using AWS.UUIDs + +""" + create_environment(desktop_arn) + create_environment(desktop_arn, params::Dict{String,<:Any}) + +Creates an environment for your thin client devices. + +# Arguments +- `desktop_arn`: The Amazon Resource Name (ARN) of the desktop to stream from Amazon + WorkSpaces, WorkSpaces Web, or AppStream 2.0. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"desiredSoftwareSetId"`: The ID of the software set to apply. +- `"desktopEndpoint"`: The URL for the identity provider login (only for environments that + use AppStream 2.0). +- `"deviceCreationTags"`: A map of the key-value pairs of the tag or tags to assign to the + newly created devices for this environment. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Key Management Service key to use to + encrypt the environment. +- `"maintenanceWindow"`: A specification for a time window to apply software updates. +- `"name"`: The name for the environment. +- `"softwareSetUpdateMode"`: An option to define which software updates to apply. +- `"softwareSetUpdateSchedule"`: An option to define if software updates should be applied + within a maintenance window. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_environment(desktopArn; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "POST", + "/environments", + Dict{String,Any}("desktopArn" => desktopArn, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_environment( + desktopArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces_thin_client( + "POST", + "/environments", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "desktopArn" => desktopArn, "clientToken" => string(uuid4()) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_device(id) + delete_device(id, params::Dict{String,<:Any}) + +Deletes a thin client device. + +# Arguments +- `id`: The ID of the device to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +""" +function delete_device(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "DELETE", + "/devices/$(id)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_device( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "DELETE", + "/devices/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_environment(id) + delete_environment(id, params::Dict{String,<:Any}) + +Deletes an environment. + +# Arguments +- `id`: The ID of the environment to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +""" +function delete_environment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "DELETE", + "/environments/$(id)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_environment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "DELETE", + "/environments/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + deregister_device(id) + deregister_device(id, params::Dict{String,<:Any}) + +Deregisters a thin client device. + +# Arguments +- `id`: The ID of the device to deregister. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"targetDeviceStatus"`: The desired new status for the device. +""" +function deregister_device(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "POST", + "/deregister-device/$(id)", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deregister_device( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "POST", + "/deregister-device/$(id)", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_device(id) + get_device(id, params::Dict{String,<:Any}) + +Returns information for a thin client device. + +# Arguments +- `id`: The ID of the device for which to return information. + +""" +function get_device(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/devices/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_device( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/devices/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_environment(id) + get_environment(id, params::Dict{String,<:Any}) + +Returns information for an environment. + +# Arguments +- `id`: The ID of the environment for which to return information. + +""" +function get_environment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/environments/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_environment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/environments/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_software_set(id) + get_software_set(id, params::Dict{String,<:Any}) + +Returns information for a software set. + +# Arguments +- `id`: The ID of the software set for which to return information. + +""" +function get_software_set(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/softwaresets/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_software_set( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/softwaresets/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_devices() + list_devices(params::Dict{String,<:Any}) + +Returns a list of thin client devices. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_devices(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/devices"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_devices( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", "/devices", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_environments() + list_environments(params::Dict{String,<:Any}) + +Returns a list of environments. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_environments(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/environments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_environments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/environments", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_software_sets() + list_software_sets(params::Dict{String,<:Any}) + +Returns a list of software sets. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_software_sets(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "GET", "/softwaresets"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_software_sets( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/softwaresets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags for a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to + retrieve tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces_thin_client( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. +- `tags`: A map of the key-value pairs of the tag or tags to assign to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces_thin_client( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag or tags from a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to untag. +- `tag_keys`: The keys of the key-value pairs for the tag or tags you want to remove from + the specified resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces_thin_client( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_device(id) + update_device(id, params::Dict{String,<:Any}) + +Updates a thin client device. + +# Arguments +- `id`: The ID of the device to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"desiredSoftwareSetId"`: The ID of the software set to apply. +- `"name"`: The name of the device to update. +- `"softwareSetUpdateSchedule"`: An option to define if software updates should be applied + within a maintenance window. +""" +function update_device(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "PATCH", "/devices/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_device( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "PATCH", + "/devices/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_environment(id) + update_environment(id, params::Dict{String,<:Any}) + +Updates an environment. + +# Arguments +- `id`: The ID of the environment to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"desiredSoftwareSetId"`: The ID of the software set to apply. +- `"desktopArn"`: The Amazon Resource Name (ARN) of the desktop to stream from Amazon + WorkSpaces, WorkSpaces Web, or AppStream 2.0. +- `"desktopEndpoint"`: The URL for the identity provider login (only for environments that + use AppStream 2.0). +- `"deviceCreationTags"`: A map of the key-value pairs of the tag or tags to assign to the + newly created devices for this environment. +- `"maintenanceWindow"`: A specification for a time window to apply software updates. +- `"name"`: The name of the environment to update. +- `"softwareSetUpdateMode"`: An option to define which software updates to apply. +- `"softwareSetUpdateSchedule"`: An option to define if software updates should be applied + within a maintenance window. +""" +function update_environment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces_thin_client( + "PATCH", + "/environments/$(id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_environment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "PATCH", + "/environments/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_software_set(id, validation_status) + update_software_set(id, validation_status, params::Dict{String,<:Any}) + +Updates a software set. + +# Arguments +- `id`: The ID of the software set to update. +- `validation_status`: An option to define if the software set has been validated. + +""" +function update_software_set( + id, validationStatus; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces_thin_client( + "PATCH", + "/softwaresets/$(id)", + Dict{String,Any}("validationStatus" => validationStatus); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_software_set( + id, + validationStatus, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces_thin_client( + "PATCH", + "/softwaresets/$(id)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("validationStatus" => validationStatus), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/workspaces_web.jl b/src/services/workspaces_web.jl index 0d9d42949a..986714a74e 100644 --- a/src/services/workspaces_web.jl +++ b/src/services/workspaces_web.jl @@ -275,7 +275,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"customerManagedKey"`: The custom managed key of the browser settings. - `"tags"`: The tags to add to the browser settings resource. A tag is a key-value pair. """ @@ -331,7 +332,9 @@ Creates an identity provider resource that is then associated with a web portal. oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML - providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional + providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit + (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 + EncryptedResponses (boolean) optional - `identity_provider_name`: The identity provider name. - `identity_provider_type`: The identity provider type. - `portal_arn`: The ARN of the web portal. @@ -342,7 +345,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. +- `"tags"`: The tags to add to the identity provider resource. A tag is a key-value pair. """ function create_identity_provider( identityProviderDetails, @@ -410,11 +415,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"customerManagedKey"`: The custom managed key of the IP access settings. - `"description"`: The description of the IP access settings. - `"displayName"`: The display name of the IP access settings. -- `"tags"`: The tags to add to the browser settings resource. A tag is a key-value pair. +- `"tags"`: The tags to add to the IP access settings resource. A tag is a key-value pair. """ function create_ip_access_settings( ipRules; aws_config::AbstractAWSConfig=global_aws_config() @@ -467,7 +473,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"tags"`: The tags to add to the network settings resource. A tag is a key-value pair. """ function create_network_settings( @@ -526,19 +533,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is - controlled through your identity provider. IAM_Identity_Center web portals are - authenticated through AWS IAM Identity Center (successor to AWS Single Sign-On). They - provide additional features, such as IdP-initiated authentication. Identity sources + controlled through your identity provider. IAM Identity Center web portals are + authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. - `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"customerManagedKey"`: The customer managed key of the web portal. - `"displayName"`: The name of the web portal. This is not visible to users who log into the web portal. +- `"instanceType"`: The type and resources of the underlying instance. +- `"maxConcurrentSessions"`: The maximum number of concurrent sessions for the portal. - `"tags"`: The tags to add to the web portal. A tag is a key-value pair. """ function create_portal(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -583,7 +592,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"tags"`: The tags to add to the trust store. A tag is a key-value pair. """ function create_trust_store( @@ -636,7 +646,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"tags"`: The tags to add to the user settings resource. A tag is a key-value pair. """ function create_user_access_logging_settings( @@ -695,11 +706,19 @@ session and the their local devices. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"additionalEncryptionContext"`: The additional encryption context of the user settings. - `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. +- `"cookieSynchronizationConfiguration"`: The configuration that specifies which cookies + should be synchronized from the end user's local browser to the remote browser. +- `"customerManagedKey"`: The customer managed key used to encrypt sensitive information in + the user settings. +- `"deepLinkAllowed"`: Specifies whether the user can use deep links that open + automatically when connecting to a session. - `"disconnectTimeoutInMinutes"`: The amount of time that a streaming session remains active after users disconnect. - `"idleDisconnectTimeoutInMinutes"`: The amount of time that users can be idle (inactive) @@ -1908,7 +1927,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. """ function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces_web( @@ -1994,7 +2014,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. """ function update_browser_settings( browserSettingsArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -2038,7 +2059,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"identityProviderDetails"`: The details of the identity provider. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: @@ -2050,7 +2072,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) - optional + optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - + Only accepts rsa-sha256 EncryptedResponses (boolean) optional - `"identityProviderName"`: The name of the identity provider. - `"identityProviderType"`: The type of the identity provider. """ @@ -2096,7 +2119,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"description"`: The description of the IP access settings. - `"displayName"`: The display name of the IP access settings. - `"ipRules"`: The updated IP rules of the IP access settings. @@ -2143,7 +2167,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"securityGroupIds"`: One or more security groups used to control access from streaming instances to your VPC. - `"subnetIds"`: The subnets in which network interfaces are created to connect streaming @@ -2193,13 +2218,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is - controlled through your identity provider. IAM_Identity_Center web portals are - authenticated through AWS IAM Identity Center (successor to AWS Single Sign-On). They - provide additional features, such as IdP-initiated authentication. Identity sources + controlled through your identity provider. IAM Identity Center web portals are + authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. - `"displayName"`: The name of the web portal. This is not visible to users who log into the web portal. +- `"instanceType"`: The type and resources of the underlying instance. +- `"maxConcurrentSessions"`: The maximum number of concurrent sessions for the portal. """ function update_portal(portalArn; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces_web( @@ -2240,7 +2266,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. """ function update_trust_store( trustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -2284,7 +2311,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. - `"kinesisStreamArn"`: The ARN of the Kinesis stream. """ function update_user_access_logging_settings( @@ -2329,9 +2357,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. - If you do not specify a client token, one is automatically generated by the AWS SDK. + If you do not specify a client token, one is automatically generated by the Amazon Web + Services SDK. +- `"cookieSynchronizationConfiguration"`: The configuration that specifies which cookies + should be synchronized from the end user's local browser to the remote browser. If the + allowlist and blocklist are empty, the configuration becomes null. - `"copyAllowed"`: Specifies whether the user can copy text from the streaming session to the local device. +- `"deepLinkAllowed"`: Specifies whether the user can use deep links that open + automatically when connecting to a session. - `"disconnectTimeoutInMinutes"`: The amount of time that a streaming session remains active after users disconnect. - `"downloadAllowed"`: Specifies whether the user can download files from the streaming diff --git a/src/services/xray.jl b/src/services/xray.jl index ee9f5b99c9..fa3f1f44b2 100644 --- a/src/services/xray.jl +++ b/src/services/xray.jl @@ -788,8 +788,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Sampling"`: Set to true to get summaries for only a subset of available traces. - `"SamplingStrategy"`: A parameter to indicate whether to enable sampling on trace summaries. Input parameters are Name and Value. -- `"TimeRangeType"`: A parameter to indicate whether to query trace summaries by TraceId or - Event time. +- `"TimeRangeType"`: A parameter to indicate whether to query trace summaries by TraceId, + Event (trace update time), or Service (segment end time). """ function get_trace_summaries( EndTime, StartTime; aws_config::AbstractAWSConfig=global_aws_config()